1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs various transformations related to eliminating memcpy 11 // calls, or transforming sets of stores into memset's. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/AliasAnalysis.h" 19 #include "llvm/Analysis/AssumptionCache.h" 20 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/Dominators.h" 25 #include "llvm/IR/GetElementPtrTypeIterator.h" 26 #include "llvm/IR/GlobalVariable.h" 27 #include "llvm/IR/IRBuilder.h" 28 #include "llvm/IR/Instructions.h" 29 #include "llvm/IR/IntrinsicInst.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/raw_ostream.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 #include <algorithm> 34 using namespace llvm; 35 36 #define DEBUG_TYPE "memcpyopt" 37 38 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 39 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 40 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 41 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 42 43 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, 44 bool &VariableIdxFound, 45 const DataLayout &DL) { 46 // Skip over the first indices. 47 gep_type_iterator GTI = gep_type_begin(GEP); 48 for (unsigned i = 1; i != Idx; ++i, ++GTI) 49 /*skip along*/; 50 51 // Compute the offset implied by the rest of the indices. 52 int64_t Offset = 0; 53 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 54 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 55 if (!OpC) 56 return VariableIdxFound = true; 57 if (OpC->isZero()) continue; // No offset. 58 59 // Handle struct indices, which add their field offset to the pointer. 60 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 61 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 62 continue; 63 } 64 65 // Otherwise, we have a sequential type like an array or vector. Multiply 66 // the index by the ElementSize. 67 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 68 Offset += Size*OpC->getSExtValue(); 69 } 70 71 return Offset; 72 } 73 74 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a 75 /// constant offset, and return that constant offset. For example, Ptr1 might 76 /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. 77 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 78 const DataLayout &DL) { 79 Ptr1 = Ptr1->stripPointerCasts(); 80 Ptr2 = Ptr2->stripPointerCasts(); 81 82 // Handle the trivial case first. 83 if (Ptr1 == Ptr2) { 84 Offset = 0; 85 return true; 86 } 87 88 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 89 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 90 91 bool VariableIdxFound = false; 92 93 // If one pointer is a GEP and the other isn't, then see if the GEP is a 94 // constant offset from the base, as in "P" and "gep P, 1". 95 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { 96 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL); 97 return !VariableIdxFound; 98 } 99 100 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { 101 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL); 102 return !VariableIdxFound; 103 } 104 105 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 106 // base. After that base, they may have some number of common (and 107 // potentially variable) indices. After that they handle some constant 108 // offset, which determines their offset from each other. At this point, we 109 // handle no other case. 110 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 111 return false; 112 113 // Skip any common indices and track the GEP types. 114 unsigned Idx = 1; 115 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 116 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 117 break; 118 119 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL); 120 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL); 121 if (VariableIdxFound) return false; 122 123 Offset = Offset2-Offset1; 124 return true; 125 } 126 127 128 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. 129 /// This allows us to analyze stores like: 130 /// store 0 -> P+1 131 /// store 0 -> P+0 132 /// store 0 -> P+3 133 /// store 0 -> P+2 134 /// which sometimes happens with stores to arrays of structs etc. When we see 135 /// the first store, we make a range [1, 2). The second store extends the range 136 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 137 /// two ranges into [0, 3) which is memset'able. 138 namespace { 139 struct MemsetRange { 140 // Start/End - A semi range that describes the span that this range covers. 141 // The range is closed at the start and open at the end: [Start, End). 142 int64_t Start, End; 143 144 /// StartPtr - The getelementptr instruction that points to the start of the 145 /// range. 146 Value *StartPtr; 147 148 /// Alignment - The known alignment of the first store. 149 unsigned Alignment; 150 151 /// TheStores - The actual stores that make up this range. 152 SmallVector<Instruction*, 16> TheStores; 153 154 bool isProfitableToUseMemset(const DataLayout &DL) const; 155 }; 156 } // end anon namespace 157 158 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const { 159 // If we found more than 4 stores to merge or 16 bytes, use memset. 160 if (TheStores.size() >= 4 || End-Start >= 16) return true; 161 162 // If there is nothing to merge, don't do anything. 163 if (TheStores.size() < 2) return false; 164 165 // If any of the stores are a memset, then it is always good to extend the 166 // memset. 167 for (unsigned i = 0, e = TheStores.size(); i != e; ++i) 168 if (!isa<StoreInst>(TheStores[i])) 169 return true; 170 171 // Assume that the code generator is capable of merging pairs of stores 172 // together if it wants to. 173 if (TheStores.size() == 2) return false; 174 175 // If we have fewer than 8 stores, it can still be worthwhile to do this. 176 // For example, merging 4 i8 stores into an i32 store is useful almost always. 177 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 178 // memset will be split into 2 32-bit stores anyway) and doing so can 179 // pessimize the llvm optimizer. 180 // 181 // Since we don't have perfect knowledge here, make some assumptions: assume 182 // the maximum GPR width is the same size as the largest legal integer 183 // size. If so, check to see whether we will end up actually reducing the 184 // number of stores used. 185 unsigned Bytes = unsigned(End-Start); 186 unsigned MaxIntSize = DL.getLargestLegalIntTypeSize(); 187 if (MaxIntSize == 0) 188 MaxIntSize = 1; 189 unsigned NumPointerStores = Bytes / MaxIntSize; 190 191 // Assume the remaining bytes if any are done a byte at a time. 192 unsigned NumByteStores = Bytes - NumPointerStores * MaxIntSize; 193 194 // If we will reduce the # stores (according to this heuristic), do the 195 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 196 // etc. 197 return TheStores.size() > NumPointerStores+NumByteStores; 198 } 199 200 201 namespace { 202 class MemsetRanges { 203 /// Ranges - A sorted list of the memset ranges. 204 SmallVector<MemsetRange, 8> Ranges; 205 typedef SmallVectorImpl<MemsetRange>::iterator range_iterator; 206 const DataLayout &DL; 207 public: 208 MemsetRanges(const DataLayout &DL) : DL(DL) {} 209 210 typedef SmallVectorImpl<MemsetRange>::const_iterator const_iterator; 211 const_iterator begin() const { return Ranges.begin(); } 212 const_iterator end() const { return Ranges.end(); } 213 bool empty() const { return Ranges.empty(); } 214 215 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 216 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 217 addStore(OffsetFromFirst, SI); 218 else 219 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 220 } 221 222 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 223 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); 224 225 addRange(OffsetFromFirst, StoreSize, 226 SI->getPointerOperand(), SI->getAlignment(), SI); 227 } 228 229 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 230 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 231 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); 232 } 233 234 void addRange(int64_t Start, int64_t Size, Value *Ptr, 235 unsigned Alignment, Instruction *Inst); 236 237 }; 238 239 } // end anon namespace 240 241 242 /// addRange - Add a new store to the MemsetRanges data structure. This adds a 243 /// new range for the specified store at the specified offset, merging into 244 /// existing ranges as appropriate. 245 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 246 unsigned Alignment, Instruction *Inst) { 247 int64_t End = Start+Size; 248 249 range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start, 250 [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; }); 251 252 // We now know that I == E, in which case we didn't find anything to merge 253 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 254 // to insert a new range. Handle this now. 255 if (I == Ranges.end() || End < I->Start) { 256 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 257 R.Start = Start; 258 R.End = End; 259 R.StartPtr = Ptr; 260 R.Alignment = Alignment; 261 R.TheStores.push_back(Inst); 262 return; 263 } 264 265 // This store overlaps with I, add it. 266 I->TheStores.push_back(Inst); 267 268 // At this point, we may have an interval that completely contains our store. 269 // If so, just add it to the interval and return. 270 if (I->Start <= Start && I->End >= End) 271 return; 272 273 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 274 // but is not entirely contained within the range. 275 276 // See if the range extends the start of the range. In this case, it couldn't 277 // possibly cause it to join the prior range, because otherwise we would have 278 // stopped on *it*. 279 if (Start < I->Start) { 280 I->Start = Start; 281 I->StartPtr = Ptr; 282 I->Alignment = Alignment; 283 } 284 285 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 286 // is in or right at the end of I), and that End >= I->Start. Extend I out to 287 // End. 288 if (End > I->End) { 289 I->End = End; 290 range_iterator NextI = I; 291 while (++NextI != Ranges.end() && End >= NextI->Start) { 292 // Merge the range in. 293 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 294 if (NextI->End > I->End) 295 I->End = NextI->End; 296 Ranges.erase(NextI); 297 NextI = I; 298 } 299 } 300 } 301 302 //===----------------------------------------------------------------------===// 303 // MemCpyOpt Pass 304 //===----------------------------------------------------------------------===// 305 306 namespace { 307 class MemCpyOpt : public FunctionPass { 308 MemoryDependenceAnalysis *MD; 309 TargetLibraryInfo *TLI; 310 public: 311 static char ID; // Pass identification, replacement for typeid 312 MemCpyOpt() : FunctionPass(ID) { 313 initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); 314 MD = nullptr; 315 TLI = nullptr; 316 } 317 318 bool runOnFunction(Function &F) override; 319 320 private: 321 // This transformation requires dominator postdominator info 322 void getAnalysisUsage(AnalysisUsage &AU) const override { 323 AU.setPreservesCFG(); 324 AU.addRequired<AssumptionCacheTracker>(); 325 AU.addRequired<DominatorTreeWrapperPass>(); 326 AU.addRequired<MemoryDependenceAnalysis>(); 327 AU.addRequired<AliasAnalysis>(); 328 AU.addRequired<TargetLibraryInfoWrapperPass>(); 329 AU.addPreserved<AliasAnalysis>(); 330 AU.addPreserved<MemoryDependenceAnalysis>(); 331 } 332 333 // Helper functions 334 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); 335 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); 336 bool processMemCpy(MemCpyInst *M); 337 bool processMemMove(MemMoveInst *M); 338 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, 339 uint64_t cpyLen, unsigned cpyAlign, CallInst *C); 340 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep); 341 bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep); 342 bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep); 343 bool processByValArgument(CallSite CS, unsigned ArgNo); 344 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, 345 Value *ByteVal); 346 347 bool iterateOnFunction(Function &F); 348 }; 349 350 char MemCpyOpt::ID = 0; 351 } 352 353 // createMemCpyOptPass - The public interface to this file... 354 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } 355 356 INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 357 false, false) 358 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 359 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 360 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 361 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 362 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 363 INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 364 false, false) 365 366 /// tryMergingIntoMemset - When scanning forward over instructions, we look for 367 /// some other patterns to fold away. In particular, this looks for stores to 368 /// neighboring locations of memory. If it sees enough consecutive ones, it 369 /// attempts to merge them together into a memcpy/memset. 370 Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, 371 Value *StartPtr, Value *ByteVal) { 372 const DataLayout &DL = StartInst->getModule()->getDataLayout(); 373 374 // Okay, so we now have a single store that can be splatable. Scan to find 375 // all subsequent stores of the same value to offset from the same pointer. 376 // Join these together into ranges, so we can decide whether contiguous blocks 377 // are stored. 378 MemsetRanges Ranges(DL); 379 380 BasicBlock::iterator BI = StartInst; 381 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 382 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 383 // If the instruction is readnone, ignore it, otherwise bail out. We 384 // don't even allow readonly here because we don't want something like: 385 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 386 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 387 break; 388 continue; 389 } 390 391 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 392 // If this is a store, see if we can merge it in. 393 if (!NextStore->isSimple()) break; 394 395 // Check to see if this stored value is of the same byte-splattable value. 396 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 397 break; 398 399 // Check to see if this store is to a constant offset from the start ptr. 400 int64_t Offset; 401 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, 402 DL)) 403 break; 404 405 Ranges.addStore(Offset, NextStore); 406 } else { 407 MemSetInst *MSI = cast<MemSetInst>(BI); 408 409 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 410 !isa<ConstantInt>(MSI->getLength())) 411 break; 412 413 // Check to see if this store is to a constant offset from the start ptr. 414 int64_t Offset; 415 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL)) 416 break; 417 418 Ranges.addMemSet(Offset, MSI); 419 } 420 } 421 422 // If we have no ranges, then we just had a single store with nothing that 423 // could be merged in. This is a very common case of course. 424 if (Ranges.empty()) 425 return nullptr; 426 427 // If we had at least one store that could be merged in, add the starting 428 // store as well. We try to avoid this unless there is at least something 429 // interesting as a small compile-time optimization. 430 Ranges.addInst(0, StartInst); 431 432 // If we create any memsets, we put it right before the first instruction that 433 // isn't part of the memset block. This ensure that the memset is dominated 434 // by any addressing instruction needed by the start of the block. 435 IRBuilder<> Builder(BI); 436 437 // Now that we have full information about ranges, loop over the ranges and 438 // emit memset's for anything big enough to be worthwhile. 439 Instruction *AMemSet = nullptr; 440 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); 441 I != E; ++I) { 442 const MemsetRange &Range = *I; 443 444 if (Range.TheStores.size() == 1) continue; 445 446 // If it is profitable to lower this range to memset, do so now. 447 if (!Range.isProfitableToUseMemset(DL)) 448 continue; 449 450 // Otherwise, we do want to transform this! Create a new memset. 451 // Get the starting pointer of the block. 452 StartPtr = Range.StartPtr; 453 454 // Determine alignment 455 unsigned Alignment = Range.Alignment; 456 if (Alignment == 0) { 457 Type *EltType = 458 cast<PointerType>(StartPtr->getType())->getElementType(); 459 Alignment = DL.getABITypeAlignment(EltType); 460 } 461 462 AMemSet = 463 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); 464 465 DEBUG(dbgs() << "Replace stores:\n"; 466 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) 467 dbgs() << *Range.TheStores[i] << '\n'; 468 dbgs() << "With: " << *AMemSet << '\n'); 469 470 if (!Range.TheStores.empty()) 471 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc()); 472 473 // Zap all the stores. 474 for (SmallVectorImpl<Instruction *>::const_iterator 475 SI = Range.TheStores.begin(), 476 SE = Range.TheStores.end(); SI != SE; ++SI) { 477 MD->removeInstruction(*SI); 478 (*SI)->eraseFromParent(); 479 } 480 ++NumMemSetInfer; 481 } 482 483 return AMemSet; 484 } 485 486 487 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 488 if (!SI->isSimple()) return false; 489 const DataLayout &DL = SI->getModule()->getDataLayout(); 490 491 // Detect cases where we're performing call slot forwarding, but 492 // happen to be using a load-store pair to implement it, rather than 493 // a memcpy. 494 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 495 if (LI->isSimple() && LI->hasOneUse() && 496 LI->getParent() == SI->getParent()) { 497 MemDepResult ldep = MD->getDependency(LI); 498 CallInst *C = nullptr; 499 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst())) 500 C = dyn_cast<CallInst>(ldep.getInst()); 501 502 if (C) { 503 // Check that nothing touches the dest of the "copy" between 504 // the call and the store. 505 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 506 MemoryLocation StoreLoc = MemoryLocation::get(SI); 507 for (BasicBlock::iterator I = --BasicBlock::iterator(SI), 508 E = C; I != E; --I) { 509 if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) { 510 C = nullptr; 511 break; 512 } 513 } 514 } 515 516 if (C) { 517 unsigned storeAlign = SI->getAlignment(); 518 if (!storeAlign) 519 storeAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType()); 520 unsigned loadAlign = LI->getAlignment(); 521 if (!loadAlign) 522 loadAlign = DL.getABITypeAlignment(LI->getType()); 523 524 bool changed = performCallSlotOptzn( 525 LI, SI->getPointerOperand()->stripPointerCasts(), 526 LI->getPointerOperand()->stripPointerCasts(), 527 DL.getTypeStoreSize(SI->getOperand(0)->getType()), 528 std::min(storeAlign, loadAlign), C); 529 if (changed) { 530 MD->removeInstruction(SI); 531 SI->eraseFromParent(); 532 MD->removeInstruction(LI); 533 LI->eraseFromParent(); 534 ++NumMemCpyInstr; 535 return true; 536 } 537 } 538 } 539 } 540 541 // There are two cases that are interesting for this code to handle: memcpy 542 // and memset. Right now we only handle memset. 543 544 // Ensure that the value being stored is something that can be memset'able a 545 // byte at a time like "0" or "-1" or any width, as well as things like 546 // 0xA0A0A0A0 and 0.0. 547 if (Value *ByteVal = isBytewiseValue(SI->getOperand(0))) 548 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 549 ByteVal)) { 550 BBI = I; // Don't invalidate iterator. 551 return true; 552 } 553 554 return false; 555 } 556 557 bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 558 // See if there is another memset or store neighboring this memset which 559 // allows us to widen out the memset to do a single larger store. 560 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 561 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 562 MSI->getValue())) { 563 BBI = I; // Don't invalidate iterator. 564 return true; 565 } 566 return false; 567 } 568 569 570 /// performCallSlotOptzn - takes a memcpy and a call that it depends on, 571 /// and checks for the possibility of a call slot optimization by having 572 /// the call write its result directly into the destination of the memcpy. 573 bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, 574 Value *cpyDest, Value *cpySrc, 575 uint64_t cpyLen, unsigned cpyAlign, 576 CallInst *C) { 577 // The general transformation to keep in mind is 578 // 579 // call @func(..., src, ...) 580 // memcpy(dest, src, ...) 581 // 582 // -> 583 // 584 // memcpy(dest, src, ...) 585 // call @func(..., dest, ...) 586 // 587 // Since moving the memcpy is technically awkward, we additionally check that 588 // src only holds uninitialized values at the moment of the call, meaning that 589 // the memcpy can be discarded rather than moved. 590 591 // Deliberately get the source and destination with bitcasts stripped away, 592 // because we'll need to do type comparisons based on the underlying type. 593 CallSite CS(C); 594 595 // Require that src be an alloca. This simplifies the reasoning considerably. 596 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 597 if (!srcAlloca) 598 return false; 599 600 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 601 if (!srcArraySize) 602 return false; 603 604 const DataLayout &DL = cpy->getModule()->getDataLayout(); 605 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) * 606 srcArraySize->getZExtValue(); 607 608 if (cpyLen < srcSize) 609 return false; 610 611 // Check that accessing the first srcSize bytes of dest will not cause a 612 // trap. Otherwise the transform is invalid since it might cause a trap 613 // to occur earlier than it otherwise would. 614 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 615 // The destination is an alloca. Check it is larger than srcSize. 616 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 617 if (!destArraySize) 618 return false; 619 620 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) * 621 destArraySize->getZExtValue(); 622 623 if (destSize < srcSize) 624 return false; 625 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 626 if (A->getDereferenceableBytes() < srcSize) { 627 // If the destination is an sret parameter then only accesses that are 628 // outside of the returned struct type can trap. 629 if (!A->hasStructRetAttr()) 630 return false; 631 632 Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 633 if (!StructTy->isSized()) { 634 // The call may never return and hence the copy-instruction may never 635 // be executed, and therefore it's not safe to say "the destination 636 // has at least <cpyLen> bytes, as implied by the copy-instruction", 637 return false; 638 } 639 640 uint64_t destSize = DL.getTypeAllocSize(StructTy); 641 if (destSize < srcSize) 642 return false; 643 } 644 } else { 645 return false; 646 } 647 648 // Check that dest points to memory that is at least as aligned as src. 649 unsigned srcAlign = srcAlloca->getAlignment(); 650 if (!srcAlign) 651 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType()); 652 bool isDestSufficientlyAligned = srcAlign <= cpyAlign; 653 // If dest is not aligned enough and we can't increase its alignment then 654 // bail out. 655 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest)) 656 return false; 657 658 // Check that src is not accessed except via the call and the memcpy. This 659 // guarantees that it holds only undefined values when passed in (so the final 660 // memcpy can be dropped), that it is not read or written between the call and 661 // the memcpy, and that writing beyond the end of it is undefined. 662 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(), 663 srcAlloca->user_end()); 664 while (!srcUseList.empty()) { 665 User *U = srcUseList.pop_back_val(); 666 667 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) { 668 for (User *UU : U->users()) 669 srcUseList.push_back(UU); 670 continue; 671 } 672 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) { 673 if (!G->hasAllZeroIndices()) 674 return false; 675 676 for (User *UU : U->users()) 677 srcUseList.push_back(UU); 678 continue; 679 } 680 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U)) 681 if (IT->getIntrinsicID() == Intrinsic::lifetime_start || 682 IT->getIntrinsicID() == Intrinsic::lifetime_end) 683 continue; 684 685 if (U != C && U != cpy) 686 return false; 687 } 688 689 // Check that src isn't captured by the called function since the 690 // transformation can cause aliasing issues in that case. 691 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 692 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i)) 693 return false; 694 695 // Since we're changing the parameter to the callsite, we need to make sure 696 // that what would be the new parameter dominates the callsite. 697 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 698 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 699 if (!DT.dominates(cpyDestInst, C)) 700 return false; 701 702 // In addition to knowing that the call does not access src in some 703 // unexpected manner, for example via a global, which we deduce from 704 // the use analysis, we also need to know that it does not sneakily 705 // access dest. We rely on AA to figure this out for us. 706 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 707 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize); 708 // If necessary, perform additional analysis. 709 if (MR != MRI_NoModRef) 710 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT); 711 if (MR != MRI_NoModRef) 712 return false; 713 714 // All the checks have passed, so do the transformation. 715 bool changedArgument = false; 716 for (unsigned i = 0; i < CS.arg_size(); ++i) 717 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 718 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest 719 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 720 cpyDest->getName(), C); 721 changedArgument = true; 722 if (CS.getArgument(i)->getType() == Dest->getType()) 723 CS.setArgument(i, Dest); 724 else 725 CS.setArgument(i, CastInst::CreatePointerCast(Dest, 726 CS.getArgument(i)->getType(), Dest->getName(), C)); 727 } 728 729 if (!changedArgument) 730 return false; 731 732 // If the destination wasn't sufficiently aligned then increase its alignment. 733 if (!isDestSufficientlyAligned) { 734 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!"); 735 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign); 736 } 737 738 // Drop any cached information about the call, because we may have changed 739 // its dependence information by changing its parameter. 740 MD->removeInstruction(C); 741 742 // Update AA metadata 743 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be 744 // handled here, but combineMetadata doesn't support them yet 745 unsigned KnownIDs[] = { 746 LLVMContext::MD_tbaa, 747 LLVMContext::MD_alias_scope, 748 LLVMContext::MD_noalias, 749 }; 750 combineMetadata(C, cpy, KnownIDs); 751 752 // Remove the memcpy. 753 MD->removeInstruction(cpy); 754 ++NumMemCpyInstr; 755 756 return true; 757 } 758 759 /// processMemCpyMemCpyDependence - We've found that the (upward scanning) 760 /// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to 761 /// copy from MDep's input if we can. 762 /// 763 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep) { 764 // We can only transforms memcpy's where the dest of one is the source of the 765 // other. 766 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 767 return false; 768 769 // If dep instruction is reading from our current input, then it is a noop 770 // transfer and substituting the input won't change this instruction. Just 771 // ignore the input and let someone else zap MDep. This handles cases like: 772 // memcpy(a <- a) 773 // memcpy(b <- a) 774 if (M->getSource() == MDep->getSource()) 775 return false; 776 777 // Second, the length of the memcpy's must be the same, or the preceding one 778 // must be larger than the following one. 779 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 780 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 781 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 782 return false; 783 784 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 785 786 // Verify that the copied-from memory doesn't change in between the two 787 // transfers. For example, in: 788 // memcpy(a <- b) 789 // *b = 42; 790 // memcpy(c <- a) 791 // It would be invalid to transform the second memcpy into memcpy(c <- b). 792 // 793 // TODO: If the code between M and MDep is transparent to the destination "c", 794 // then we could still perform the xform by moving M up to the first memcpy. 795 // 796 // NOTE: This is conservative, it will stop on any read from the source loc, 797 // not just the defining memcpy. 798 MemDepResult SourceDep = MD->getPointerDependencyFrom( 799 MemoryLocation::getForSource(MDep), false, M, M->getParent()); 800 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 801 return false; 802 803 // If the dest of the second might alias the source of the first, then the 804 // source and dest might overlap. We still want to eliminate the intermediate 805 // value, but we have to generate a memmove instead of memcpy. 806 bool UseMemMove = false; 807 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 808 MemoryLocation::getForSource(MDep))) 809 UseMemMove = true; 810 811 // If all checks passed, then we can transform M. 812 813 // Make sure to use the lesser of the alignment of the source and the dest 814 // since we're changing where we're reading from, but don't want to increase 815 // the alignment past what can be read from or written to. 816 // TODO: Is this worth it if we're creating a less aligned memcpy? For 817 // example we could be moving from movaps -> movq on x86. 818 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 819 820 IRBuilder<> Builder(M); 821 if (UseMemMove) 822 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), 823 Align, M->isVolatile()); 824 else 825 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), 826 Align, M->isVolatile()); 827 828 // Remove the instruction we're replacing. 829 MD->removeInstruction(M); 830 M->eraseFromParent(); 831 ++NumMemCpyInstr; 832 return true; 833 } 834 835 /// We've found that the (upward scanning) memory dependence of \p MemCpy is 836 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that 837 /// weren't copied over by \p MemCpy. 838 /// 839 /// In other words, transform: 840 /// \code 841 /// memset(dst, c, dst_size); 842 /// memcpy(dst, src, src_size); 843 /// \endcode 844 /// into: 845 /// \code 846 /// memcpy(dst, src, src_size); 847 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); 848 /// \endcode 849 bool MemCpyOpt::processMemSetMemCpyDependence(MemCpyInst *MemCpy, 850 MemSetInst *MemSet) { 851 // We can only transform memset/memcpy with the same destination. 852 if (MemSet->getDest() != MemCpy->getDest()) 853 return false; 854 855 // Check that there are no other dependencies on the memset destination. 856 MemDepResult DstDepInfo = MD->getPointerDependencyFrom( 857 MemoryLocation::getForDest(MemSet), false, MemCpy, MemCpy->getParent()); 858 if (DstDepInfo.getInst() != MemSet) 859 return false; 860 861 // Use the same i8* dest as the memcpy, killing the memset dest if different. 862 Value *Dest = MemCpy->getRawDest(); 863 Value *DestSize = MemSet->getLength(); 864 Value *SrcSize = MemCpy->getLength(); 865 866 // By default, create an unaligned memset. 867 unsigned Align = 1; 868 // If Dest is aligned, and SrcSize is constant, use the minimum alignment 869 // of the sum. 870 const unsigned DestAlign = 871 std::max(MemSet->getAlignment(), MemCpy->getAlignment()); 872 if (DestAlign > 1) 873 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize)) 874 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign); 875 876 IRBuilder<> Builder(MemCpy); 877 878 // If the sizes have different types, zext the smaller one. 879 if (DestSize->getType() != SrcSize->getType()) { 880 if (DestSize->getType()->getIntegerBitWidth() > 881 SrcSize->getType()->getIntegerBitWidth()) 882 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType()); 883 else 884 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType()); 885 } 886 887 Value *MemsetLen = 888 Builder.CreateSelect(Builder.CreateICmpULE(DestSize, SrcSize), 889 ConstantInt::getNullValue(DestSize->getType()), 890 Builder.CreateSub(DestSize, SrcSize)); 891 Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1), 892 MemsetLen, Align); 893 894 MD->removeInstruction(MemSet); 895 MemSet->eraseFromParent(); 896 return true; 897 } 898 899 /// Transform memcpy to memset when its source was just memset. 900 /// In other words, turn: 901 /// \code 902 /// memset(dst1, c, dst1_size); 903 /// memcpy(dst2, dst1, dst2_size); 904 /// \endcode 905 /// into: 906 /// \code 907 /// memset(dst1, c, dst1_size); 908 /// memset(dst2, c, dst2_size); 909 /// \endcode 910 /// When dst2_size <= dst1_size. 911 /// 912 /// The \p MemCpy must have a Constant length. 913 bool MemCpyOpt::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, 914 MemSetInst *MemSet) { 915 // This only makes sense on memcpy(..., memset(...), ...). 916 if (MemSet->getRawDest() != MemCpy->getRawSource()) 917 return false; 918 919 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength()); 920 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength()); 921 // Make sure the memcpy doesn't read any more than what the memset wrote. 922 // Don't worry about sizes larger than i64. 923 if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue()) 924 return false; 925 926 IRBuilder<> Builder(MemCpy); 927 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1), 928 CopySize, MemCpy->getAlignment()); 929 return true; 930 } 931 932 /// processMemCpy - perform simplification of memcpy's. If we have memcpy A 933 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 934 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 935 /// circumstances). This allows later passes to remove the first memcpy 936 /// altogether. 937 bool MemCpyOpt::processMemCpy(MemCpyInst *M) { 938 // We can only optimize non-volatile memcpy's. 939 if (M->isVolatile()) return false; 940 941 // If the source and destination of the memcpy are the same, then zap it. 942 if (M->getSource() == M->getDest()) { 943 MD->removeInstruction(M); 944 M->eraseFromParent(); 945 return false; 946 } 947 948 // If copying from a constant, try to turn the memcpy into a memset. 949 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 950 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 951 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { 952 IRBuilder<> Builder(M); 953 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(), 954 M->getAlignment(), false); 955 MD->removeInstruction(M); 956 M->eraseFromParent(); 957 ++NumCpyToSet; 958 return true; 959 } 960 961 MemDepResult DepInfo = MD->getDependency(M); 962 963 // Try to turn a partially redundant memset + memcpy into 964 // memcpy + smaller memset. We don't need the memcpy size for this. 965 if (DepInfo.isClobber()) 966 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst())) 967 if (processMemSetMemCpyDependence(M, MDep)) 968 return true; 969 970 // The optimizations after this point require the memcpy size. 971 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 972 if (!CopySize) return false; 973 974 // There are four possible optimizations we can do for memcpy: 975 // a) memcpy-memcpy xform which exposes redundance for DSE. 976 // b) call-memcpy xform for return slot optimization. 977 // c) memcpy from freshly alloca'd space or space that has just started its 978 // lifetime copies undefined data, and we can therefore eliminate the 979 // memcpy in favor of the data that was already at the destination. 980 // d) memcpy from a just-memset'd source can be turned into memset. 981 if (DepInfo.isClobber()) { 982 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 983 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 984 CopySize->getZExtValue(), M->getAlignment(), 985 C)) { 986 MD->removeInstruction(M); 987 M->eraseFromParent(); 988 return true; 989 } 990 } 991 } 992 993 MemoryLocation SrcLoc = MemoryLocation::getForSource(M); 994 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true, 995 M, M->getParent()); 996 997 if (SrcDepInfo.isClobber()) { 998 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst())) 999 return processMemCpyMemCpyDependence(M, MDep); 1000 } else if (SrcDepInfo.isDef()) { 1001 Instruction *I = SrcDepInfo.getInst(); 1002 bool hasUndefContents = false; 1003 1004 if (isa<AllocaInst>(I)) { 1005 hasUndefContents = true; 1006 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1007 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1008 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0))) 1009 if (LTSize->getZExtValue() >= CopySize->getZExtValue()) 1010 hasUndefContents = true; 1011 } 1012 1013 if (hasUndefContents) { 1014 MD->removeInstruction(M); 1015 M->eraseFromParent(); 1016 ++NumMemCpyInstr; 1017 return true; 1018 } 1019 } 1020 1021 if (SrcDepInfo.isClobber()) 1022 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst())) 1023 if (performMemCpyToMemSetOptzn(M, MDep)) { 1024 MD->removeInstruction(M); 1025 M->eraseFromParent(); 1026 ++NumCpyToSet; 1027 return true; 1028 } 1029 1030 return false; 1031 } 1032 1033 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst 1034 /// are guaranteed not to alias. 1035 bool MemCpyOpt::processMemMove(MemMoveInst *M) { 1036 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 1037 1038 if (!TLI->has(LibFunc::memmove)) 1039 return false; 1040 1041 // See if the pointers alias. 1042 if (!AA.isNoAlias(MemoryLocation::getForDest(M), 1043 MemoryLocation::getForSource(M))) 1044 return false; 1045 1046 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); 1047 1048 // If not, then we know we can transform this. 1049 Module *Mod = M->getParent()->getParent()->getParent(); 1050 Type *ArgTys[3] = { M->getRawDest()->getType(), 1051 M->getRawSource()->getType(), 1052 M->getLength()->getType() }; 1053 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, 1054 ArgTys)); 1055 1056 // MemDep may have over conservative information about this instruction, just 1057 // conservatively flush it from the cache. 1058 MD->removeInstruction(M); 1059 1060 ++NumMoveToCpy; 1061 return true; 1062 } 1063 1064 /// processByValArgument - This is called on every byval argument in call sites. 1065 bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { 1066 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout(); 1067 // Find out what feeds this byval argument. 1068 Value *ByValArg = CS.getArgument(ArgNo); 1069 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); 1070 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); 1071 MemDepResult DepInfo = MD->getPointerDependencyFrom( 1072 MemoryLocation(ByValArg, ByValSize), true, CS.getInstruction(), 1073 CS.getInstruction()->getParent()); 1074 if (!DepInfo.isClobber()) 1075 return false; 1076 1077 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 1078 // a memcpy, see if we can byval from the source of the memcpy instead of the 1079 // result. 1080 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 1081 if (!MDep || MDep->isVolatile() || 1082 ByValArg->stripPointerCasts() != MDep->getDest()) 1083 return false; 1084 1085 // The length of the memcpy must be larger or equal to the size of the byval. 1086 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 1087 if (!C1 || C1->getValue().getZExtValue() < ByValSize) 1088 return false; 1089 1090 // Get the alignment of the byval. If the call doesn't specify the alignment, 1091 // then it is some target specific value that we can't know. 1092 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); 1093 if (ByValAlign == 0) return false; 1094 1095 // If it is greater than the memcpy, then we check to see if we can force the 1096 // source of the memcpy to the alignment we need. If we fail, we bail out. 1097 AssumptionCache &AC = 1098 getAnalysis<AssumptionCacheTracker>().getAssumptionCache( 1099 *CS->getParent()->getParent()); 1100 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1101 if (MDep->getAlignment() < ByValAlign && 1102 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, 1103 CS.getInstruction(), &AC, &DT) < ByValAlign) 1104 return false; 1105 1106 // Verify that the copied-from memory doesn't change in between the memcpy and 1107 // the byval call. 1108 // memcpy(a <- b) 1109 // *b = 42; 1110 // foo(*a) 1111 // It would be invalid to transform the second memcpy into foo(*b). 1112 // 1113 // NOTE: This is conservative, it will stop on any read from the source loc, 1114 // not just the defining memcpy. 1115 MemDepResult SourceDep = 1116 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, 1117 CS.getInstruction(), MDep->getParent()); 1118 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 1119 return false; 1120 1121 Value *TmpCast = MDep->getSource(); 1122 if (MDep->getSource()->getType() != ByValArg->getType()) 1123 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 1124 "tmpcast", CS.getInstruction()); 1125 1126 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" 1127 << " " << *MDep << "\n" 1128 << " " << *CS.getInstruction() << "\n"); 1129 1130 // Otherwise we're good! Update the byval argument. 1131 CS.setArgument(ArgNo, TmpCast); 1132 ++NumMemCpyInstr; 1133 return true; 1134 } 1135 1136 /// iterateOnFunction - Executes one iteration of MemCpyOpt. 1137 bool MemCpyOpt::iterateOnFunction(Function &F) { 1138 bool MadeChange = false; 1139 1140 // Walk all instruction in the function. 1141 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { 1142 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { 1143 // Avoid invalidating the iterator. 1144 Instruction *I = BI++; 1145 1146 bool RepeatInstruction = false; 1147 1148 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 1149 MadeChange |= processStore(SI, BI); 1150 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 1151 RepeatInstruction = processMemSet(M, BI); 1152 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 1153 RepeatInstruction = processMemCpy(M); 1154 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 1155 RepeatInstruction = processMemMove(M); 1156 else if (auto CS = CallSite(I)) { 1157 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 1158 if (CS.isByValArgument(i)) 1159 MadeChange |= processByValArgument(CS, i); 1160 } 1161 1162 // Reprocess the instruction if desired. 1163 if (RepeatInstruction) { 1164 if (BI != BB->begin()) --BI; 1165 MadeChange = true; 1166 } 1167 } 1168 } 1169 1170 return MadeChange; 1171 } 1172 1173 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a 1174 // function. 1175 // 1176 bool MemCpyOpt::runOnFunction(Function &F) { 1177 if (skipOptnoneFunction(F)) 1178 return false; 1179 1180 bool MadeChange = false; 1181 MD = &getAnalysis<MemoryDependenceAnalysis>(); 1182 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1183 1184 // If we don't have at least memset and memcpy, there is little point of doing 1185 // anything here. These are required by a freestanding implementation, so if 1186 // even they are disabled, there is no point in trying hard. 1187 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy)) 1188 return false; 1189 1190 while (1) { 1191 if (!iterateOnFunction(F)) 1192 break; 1193 MadeChange = true; 1194 } 1195 1196 MD = nullptr; 1197 return MadeChange; 1198 } 1199