1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs various transformations related to eliminating memcpy 11 // calls, or transforming sets of stores into memset's. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "memcpyopt" 16 #include "llvm/Transforms/Scalar.h" 17 #include "llvm/GlobalVariable.h" 18 #include "llvm/IntrinsicInst.h" 19 #include "llvm/Instructions.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/Dominators.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 25 #include "llvm/Analysis/ValueTracking.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/GetElementPtrTypeIterator.h" 28 #include "llvm/Support/IRBuilder.h" 29 #include "llvm/Support/raw_ostream.h" 30 #include "llvm/Target/TargetData.h" 31 #include <list> 32 using namespace llvm; 33 34 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted"); 35 STATISTIC(NumMemSetInfer, "Number of memsets inferred"); 36 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); 37 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); 38 39 static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, 40 bool &VariableIdxFound, const TargetData &TD){ 41 // Skip over the first indices. 42 gep_type_iterator GTI = gep_type_begin(GEP); 43 for (unsigned i = 1; i != Idx; ++i, ++GTI) 44 /*skip along*/; 45 46 // Compute the offset implied by the rest of the indices. 47 int64_t Offset = 0; 48 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 49 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 50 if (OpC == 0) 51 return VariableIdxFound = true; 52 if (OpC->isZero()) continue; // No offset. 53 54 // Handle struct indices, which add their field offset to the pointer. 55 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 56 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 57 continue; 58 } 59 60 // Otherwise, we have a sequential type like an array or vector. Multiply 61 // the index by the ElementSize. 62 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); 63 Offset += Size*OpC->getSExtValue(); 64 } 65 66 return Offset; 67 } 68 69 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a 70 /// constant offset, and return that constant offset. For example, Ptr1 might 71 /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. 72 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, 73 const TargetData &TD) { 74 Ptr1 = Ptr1->stripPointerCasts(); 75 Ptr2 = Ptr2->stripPointerCasts(); 76 GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1); 77 GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2); 78 79 bool VariableIdxFound = false; 80 81 // If one pointer is a GEP and the other isn't, then see if the GEP is a 82 // constant offset from the base, as in "P" and "gep P, 1". 83 if (GEP1 && GEP2 == 0 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) { 84 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD); 85 return !VariableIdxFound; 86 } 87 88 if (GEP2 && GEP1 == 0 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) { 89 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD); 90 return !VariableIdxFound; 91 } 92 93 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 94 // base. After that base, they may have some number of common (and 95 // potentially variable) indices. After that they handle some constant 96 // offset, which determines their offset from each other. At this point, we 97 // handle no other case. 98 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 99 return false; 100 101 // Skip any common indices and track the GEP types. 102 unsigned Idx = 1; 103 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 104 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 105 break; 106 107 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); 108 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); 109 if (VariableIdxFound) return false; 110 111 Offset = Offset2-Offset1; 112 return true; 113 } 114 115 116 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. 117 /// This allows us to analyze stores like: 118 /// store 0 -> P+1 119 /// store 0 -> P+0 120 /// store 0 -> P+3 121 /// store 0 -> P+2 122 /// which sometimes happens with stores to arrays of structs etc. When we see 123 /// the first store, we make a range [1, 2). The second store extends the range 124 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the 125 /// two ranges into [0, 3) which is memset'able. 126 namespace { 127 struct MemsetRange { 128 // Start/End - A semi range that describes the span that this range covers. 129 // The range is closed at the start and open at the end: [Start, End). 130 int64_t Start, End; 131 132 /// StartPtr - The getelementptr instruction that points to the start of the 133 /// range. 134 Value *StartPtr; 135 136 /// Alignment - The known alignment of the first store. 137 unsigned Alignment; 138 139 /// TheStores - The actual stores that make up this range. 140 SmallVector<Instruction*, 16> TheStores; 141 142 bool isProfitableToUseMemset(const TargetData &TD) const; 143 144 }; 145 } // end anon namespace 146 147 bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const { 148 // If we found more than 8 stores to merge or 64 bytes, use memset. 149 if (TheStores.size() >= 8 || End-Start >= 64) return true; 150 151 // If there is nothing to merge, don't do anything. 152 if (TheStores.size() < 2) return false; 153 154 // If any of the stores are a memset, then it is always good to extend the 155 // memset. 156 for (unsigned i = 0, e = TheStores.size(); i != e; ++i) 157 if (!isa<StoreInst>(TheStores[i])) 158 return true; 159 160 // Assume that the code generator is capable of merging pairs of stores 161 // together if it wants to. 162 if (TheStores.size() == 2) return false; 163 164 // If we have fewer than 8 stores, it can still be worthwhile to do this. 165 // For example, merging 4 i8 stores into an i32 store is useful almost always. 166 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the 167 // memset will be split into 2 32-bit stores anyway) and doing so can 168 // pessimize the llvm optimizer. 169 // 170 // Since we don't have perfect knowledge here, make some assumptions: assume 171 // the maximum GPR width is the same size as the pointer size and assume that 172 // this width can be stored. If so, check to see whether we will end up 173 // actually reducing the number of stores used. 174 unsigned Bytes = unsigned(End-Start); 175 unsigned NumPointerStores = Bytes/TD.getPointerSize(); 176 177 // Assume the remaining bytes if any are done a byte at a time. 178 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); 179 180 // If we will reduce the # stores (according to this heuristic), do the 181 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 182 // etc. 183 return TheStores.size() > NumPointerStores+NumByteStores; 184 } 185 186 187 namespace { 188 class MemsetRanges { 189 /// Ranges - A sorted list of the memset ranges. We use std::list here 190 /// because each element is relatively large and expensive to copy. 191 std::list<MemsetRange> Ranges; 192 typedef std::list<MemsetRange>::iterator range_iterator; 193 const TargetData &TD; 194 public: 195 MemsetRanges(const TargetData &td) : TD(td) {} 196 197 typedef std::list<MemsetRange>::const_iterator const_iterator; 198 const_iterator begin() const { return Ranges.begin(); } 199 const_iterator end() const { return Ranges.end(); } 200 bool empty() const { return Ranges.empty(); } 201 202 void addInst(int64_t OffsetFromFirst, Instruction *Inst) { 203 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 204 addStore(OffsetFromFirst, SI); 205 else 206 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst)); 207 } 208 209 void addStore(int64_t OffsetFromFirst, StoreInst *SI) { 210 int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType()); 211 212 addRange(OffsetFromFirst, StoreSize, 213 SI->getPointerOperand(), SI->getAlignment(), SI); 214 } 215 216 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { 217 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue(); 218 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); 219 } 220 221 void addRange(int64_t Start, int64_t Size, Value *Ptr, 222 unsigned Alignment, Instruction *Inst); 223 224 }; 225 226 } // end anon namespace 227 228 229 /// addRange - Add a new store to the MemsetRanges data structure. This adds a 230 /// new range for the specified store at the specified offset, merging into 231 /// existing ranges as appropriate. 232 /// 233 /// Do a linear search of the ranges to see if this can be joined and/or to 234 /// find the insertion point in the list. We keep the ranges sorted for 235 /// simplicity here. This is a linear search of a linked list, which is ugly, 236 /// however the number of ranges is limited, so this won't get crazy slow. 237 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr, 238 unsigned Alignment, Instruction *Inst) { 239 int64_t End = Start+Size; 240 range_iterator I = Ranges.begin(), E = Ranges.end(); 241 242 while (I != E && Start > I->End) 243 ++I; 244 245 // We now know that I == E, in which case we didn't find anything to merge 246 // with, or that Start <= I->End. If End < I->Start or I == E, then we need 247 // to insert a new range. Handle this now. 248 if (I == E || End < I->Start) { 249 MemsetRange &R = *Ranges.insert(I, MemsetRange()); 250 R.Start = Start; 251 R.End = End; 252 R.StartPtr = Ptr; 253 R.Alignment = Alignment; 254 R.TheStores.push_back(Inst); 255 return; 256 } 257 258 // This store overlaps with I, add it. 259 I->TheStores.push_back(Inst); 260 261 // At this point, we may have an interval that completely contains our store. 262 // If so, just add it to the interval and return. 263 if (I->Start <= Start && I->End >= End) 264 return; 265 266 // Now we know that Start <= I->End and End >= I->Start so the range overlaps 267 // but is not entirely contained within the range. 268 269 // See if the range extends the start of the range. In this case, it couldn't 270 // possibly cause it to join the prior range, because otherwise we would have 271 // stopped on *it*. 272 if (Start < I->Start) { 273 I->Start = Start; 274 I->StartPtr = Ptr; 275 I->Alignment = Alignment; 276 } 277 278 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint 279 // is in or right at the end of I), and that End >= I->Start. Extend I out to 280 // End. 281 if (End > I->End) { 282 I->End = End; 283 range_iterator NextI = I; 284 while (++NextI != E && End >= NextI->Start) { 285 // Merge the range in. 286 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); 287 if (NextI->End > I->End) 288 I->End = NextI->End; 289 Ranges.erase(NextI); 290 NextI = I; 291 } 292 } 293 } 294 295 //===----------------------------------------------------------------------===// 296 // MemCpyOpt Pass 297 //===----------------------------------------------------------------------===// 298 299 namespace { 300 class MemCpyOpt : public FunctionPass { 301 MemoryDependenceAnalysis *MD; 302 const TargetData *TD; 303 public: 304 static char ID; // Pass identification, replacement for typeid 305 MemCpyOpt() : FunctionPass(ID) { 306 initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); 307 MD = 0; 308 } 309 310 bool runOnFunction(Function &F); 311 312 private: 313 // This transformation requires dominator postdominator info 314 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 315 AU.setPreservesCFG(); 316 AU.addRequired<DominatorTree>(); 317 AU.addRequired<MemoryDependenceAnalysis>(); 318 AU.addRequired<AliasAnalysis>(); 319 AU.addPreserved<AliasAnalysis>(); 320 AU.addPreserved<MemoryDependenceAnalysis>(); 321 } 322 323 // Helper fuctions 324 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); 325 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); 326 bool processMemCpy(MemCpyInst *M); 327 bool processMemMove(MemMoveInst *M); 328 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, 329 uint64_t cpyLen, CallInst *C); 330 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 331 uint64_t MSize); 332 bool processByValArgument(CallSite CS, unsigned ArgNo); 333 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, 334 Value *ByteVal); 335 336 bool iterateOnFunction(Function &F); 337 }; 338 339 char MemCpyOpt::ID = 0; 340 } 341 342 // createMemCpyOptPass - The public interface to this file... 343 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); } 344 345 INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 346 false, false) 347 INITIALIZE_PASS_DEPENDENCY(DominatorTree) 348 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 349 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 350 INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", 351 false, false) 352 353 /// tryMergingIntoMemset - When scanning forward over instructions, we look for 354 /// some other patterns to fold away. In particular, this looks for stores to 355 /// neighboring locations of memory. If it sees enough consecutive ones, it 356 /// attempts to merge them together into a memcpy/memset. 357 Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, 358 Value *StartPtr, Value *ByteVal) { 359 if (TD == 0) return 0; 360 361 // Okay, so we now have a single store that can be splatable. Scan to find 362 // all subsequent stores of the same value to offset from the same pointer. 363 // Join these together into ranges, so we can decide whether contiguous blocks 364 // are stored. 365 MemsetRanges Ranges(*TD); 366 367 BasicBlock::iterator BI = StartInst; 368 for (++BI; !isa<TerminatorInst>(BI); ++BI) { 369 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) { 370 // If the instruction is readnone, ignore it, otherwise bail out. We 371 // don't even allow readonly here because we don't want something like: 372 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). 373 if (BI->mayWriteToMemory() || BI->mayReadFromMemory()) 374 break; 375 continue; 376 } 377 378 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) { 379 // If this is a store, see if we can merge it in. 380 if (NextStore->isVolatile()) break; 381 382 // Check to see if this stored value is of the same byte-splattable value. 383 if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) 384 break; 385 386 // Check to see if this store is to a constant offset from the start ptr. 387 int64_t Offset; 388 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), 389 Offset, *TD)) 390 break; 391 392 Ranges.addStore(Offset, NextStore); 393 } else { 394 MemSetInst *MSI = cast<MemSetInst>(BI); 395 396 if (MSI->isVolatile() || ByteVal != MSI->getValue() || 397 !isa<ConstantInt>(MSI->getLength())) 398 break; 399 400 // Check to see if this store is to a constant offset from the start ptr. 401 int64_t Offset; 402 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD)) 403 break; 404 405 Ranges.addMemSet(Offset, MSI); 406 } 407 } 408 409 // If we have no ranges, then we just had a single store with nothing that 410 // could be merged in. This is a very common case of course. 411 if (Ranges.empty()) 412 return 0; 413 414 // If we had at least one store that could be merged in, add the starting 415 // store as well. We try to avoid this unless there is at least something 416 // interesting as a small compile-time optimization. 417 Ranges.addInst(0, StartInst); 418 419 // If we create any memsets, we put it right before the first instruction that 420 // isn't part of the memset block. This ensure that the memset is dominated 421 // by any addressing instruction needed by the start of the block. 422 IRBuilder<> Builder(BI); 423 424 // Now that we have full information about ranges, loop over the ranges and 425 // emit memset's for anything big enough to be worthwhile. 426 Instruction *AMemSet = 0; 427 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); 428 I != E; ++I) { 429 const MemsetRange &Range = *I; 430 431 if (Range.TheStores.size() == 1) continue; 432 433 // If it is profitable to lower this range to memset, do so now. 434 if (!Range.isProfitableToUseMemset(*TD)) 435 continue; 436 437 // Otherwise, we do want to transform this! Create a new memset. 438 // Get the starting pointer of the block. 439 StartPtr = Range.StartPtr; 440 441 // Determine alignment 442 unsigned Alignment = Range.Alignment; 443 if (Alignment == 0) { 444 const Type *EltType = 445 cast<PointerType>(StartPtr->getType())->getElementType(); 446 Alignment = TD->getABITypeAlignment(EltType); 447 } 448 449 AMemSet = 450 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment); 451 452 DEBUG(dbgs() << "Replace stores:\n"; 453 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) 454 dbgs() << *Range.TheStores[i] << '\n'; 455 dbgs() << "With: " << *AMemSet << '\n'); 456 457 // Zap all the stores. 458 for (SmallVector<Instruction*, 16>::const_iterator 459 SI = Range.TheStores.begin(), 460 SE = Range.TheStores.end(); SI != SE; ++SI) { 461 MD->removeInstruction(*SI); 462 (*SI)->eraseFromParent(); 463 } 464 ++NumMemSetInfer; 465 } 466 467 return AMemSet; 468 } 469 470 471 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { 472 if (SI->isVolatile()) return false; 473 474 if (TD == 0) return false; 475 476 // Detect cases where we're performing call slot forwarding, but 477 // happen to be using a load-store pair to implement it, rather than 478 // a memcpy. 479 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) { 480 if (!LI->isVolatile() && LI->hasOneUse()) { 481 MemDepResult dep = MD->getDependency(LI); 482 CallInst *C = 0; 483 if (dep.isClobber() && !isa<MemCpyInst>(dep.getInst())) 484 C = dyn_cast<CallInst>(dep.getInst()); 485 486 if (C) { 487 bool changed = performCallSlotOptzn(LI, 488 SI->getPointerOperand()->stripPointerCasts(), 489 LI->getPointerOperand()->stripPointerCasts(), 490 TD->getTypeStoreSize(SI->getOperand(0)->getType()), C); 491 if (changed) { 492 MD->removeInstruction(SI); 493 SI->eraseFromParent(); 494 MD->removeInstruction(LI); 495 LI->eraseFromParent(); 496 ++NumMemCpyInstr; 497 return true; 498 } 499 } 500 } 501 } 502 503 // There are two cases that are interesting for this code to handle: memcpy 504 // and memset. Right now we only handle memset. 505 506 // Ensure that the value being stored is something that can be memset'able a 507 // byte at a time like "0" or "-1" or any width, as well as things like 508 // 0xA0A0A0A0 and 0.0. 509 if (Value *ByteVal = isBytewiseValue(SI->getOperand(0))) 510 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), 511 ByteVal)) { 512 BBI = I; // Don't invalidate iterator. 513 return true; 514 } 515 516 return false; 517 } 518 519 bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { 520 // See if there is another memset or store neighboring this memset which 521 // allows us to widen out the memset to do a single larger store. 522 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile()) 523 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), 524 MSI->getValue())) { 525 BBI = I; // Don't invalidate iterator. 526 return true; 527 } 528 return false; 529 } 530 531 532 /// performCallSlotOptzn - takes a memcpy and a call that it depends on, 533 /// and checks for the possibility of a call slot optimization by having 534 /// the call write its result directly into the destination of the memcpy. 535 bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, 536 Value *cpyDest, Value *cpySrc, 537 uint64_t cpyLen, CallInst *C) { 538 // The general transformation to keep in mind is 539 // 540 // call @func(..., src, ...) 541 // memcpy(dest, src, ...) 542 // 543 // -> 544 // 545 // memcpy(dest, src, ...) 546 // call @func(..., dest, ...) 547 // 548 // Since moving the memcpy is technically awkward, we additionally check that 549 // src only holds uninitialized values at the moment of the call, meaning that 550 // the memcpy can be discarded rather than moved. 551 552 // Deliberately get the source and destination with bitcasts stripped away, 553 // because we'll need to do type comparisons based on the underlying type. 554 CallSite CS(C); 555 556 // Require that src be an alloca. This simplifies the reasoning considerably. 557 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc); 558 if (!srcAlloca) 559 return false; 560 561 // Check that all of src is copied to dest. 562 if (TD == 0) return false; 563 564 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); 565 if (!srcArraySize) 566 return false; 567 568 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) * 569 srcArraySize->getZExtValue(); 570 571 if (cpyLen < srcSize) 572 return false; 573 574 // Check that accessing the first srcSize bytes of dest will not cause a 575 // trap. Otherwise the transform is invalid since it might cause a trap 576 // to occur earlier than it otherwise would. 577 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) { 578 // The destination is an alloca. Check it is larger than srcSize. 579 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize()); 580 if (!destArraySize) 581 return false; 582 583 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) * 584 destArraySize->getZExtValue(); 585 586 if (destSize < srcSize) 587 return false; 588 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) { 589 // If the destination is an sret parameter then only accesses that are 590 // outside of the returned struct type can trap. 591 if (!A->hasStructRetAttr()) 592 return false; 593 594 const Type *StructTy = cast<PointerType>(A->getType())->getElementType(); 595 uint64_t destSize = TD->getTypeAllocSize(StructTy); 596 597 if (destSize < srcSize) 598 return false; 599 } else { 600 return false; 601 } 602 603 // Check that src is not accessed except via the call and the memcpy. This 604 // guarantees that it holds only undefined values when passed in (so the final 605 // memcpy can be dropped), that it is not read or written between the call and 606 // the memcpy, and that writing beyond the end of it is undefined. 607 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(), 608 srcAlloca->use_end()); 609 while (!srcUseList.empty()) { 610 User *UI = srcUseList.pop_back_val(); 611 612 if (isa<BitCastInst>(UI)) { 613 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 614 I != E; ++I) 615 srcUseList.push_back(*I); 616 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) { 617 if (G->hasAllZeroIndices()) 618 for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); 619 I != E; ++I) 620 srcUseList.push_back(*I); 621 else 622 return false; 623 } else if (UI != C && UI != cpy) { 624 return false; 625 } 626 } 627 628 // Since we're changing the parameter to the callsite, we need to make sure 629 // that what would be the new parameter dominates the callsite. 630 DominatorTree &DT = getAnalysis<DominatorTree>(); 631 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest)) 632 if (!DT.dominates(cpyDestInst, C)) 633 return false; 634 635 // In addition to knowing that the call does not access src in some 636 // unexpected manner, for example via a global, which we deduce from 637 // the use analysis, we also need to know that it does not sneakily 638 // access dest. We rely on AA to figure this out for us. 639 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 640 if (AA.getModRefInfo(C, cpyDest, srcSize) != AliasAnalysis::NoModRef) 641 return false; 642 643 // All the checks have passed, so do the transformation. 644 bool changedArgument = false; 645 for (unsigned i = 0; i < CS.arg_size(); ++i) 646 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { 647 if (cpySrc->getType() != cpyDest->getType()) 648 cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), 649 cpyDest->getName(), C); 650 changedArgument = true; 651 if (CS.getArgument(i)->getType() == cpyDest->getType()) 652 CS.setArgument(i, cpyDest); 653 else 654 CS.setArgument(i, CastInst::CreatePointerCast(cpyDest, 655 CS.getArgument(i)->getType(), cpyDest->getName(), C)); 656 } 657 658 if (!changedArgument) 659 return false; 660 661 // Drop any cached information about the call, because we may have changed 662 // its dependence information by changing its parameter. 663 MD->removeInstruction(C); 664 665 // Remove the memcpy. 666 MD->removeInstruction(cpy); 667 ++NumMemCpyInstr; 668 669 return true; 670 } 671 672 /// processMemCpyMemCpyDependence - We've found that the (upward scanning) 673 /// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to 674 /// copy from MDep's input if we can. MSize is the size of M's copy. 675 /// 676 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, 677 uint64_t MSize) { 678 // We can only transforms memcpy's where the dest of one is the source of the 679 // other. 680 if (M->getSource() != MDep->getDest() || MDep->isVolatile()) 681 return false; 682 683 // If dep instruction is reading from our current input, then it is a noop 684 // transfer and substituting the input won't change this instruction. Just 685 // ignore the input and let someone else zap MDep. This handles cases like: 686 // memcpy(a <- a) 687 // memcpy(b <- a) 688 if (M->getSource() == MDep->getSource()) 689 return false; 690 691 // Second, the length of the memcpy's must be the same, or the preceding one 692 // must be larger than the following one. 693 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); 694 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); 695 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue()) 696 return false; 697 698 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 699 700 // Verify that the copied-from memory doesn't change in between the two 701 // transfers. For example, in: 702 // memcpy(a <- b) 703 // *b = 42; 704 // memcpy(c <- a) 705 // It would be invalid to transform the second memcpy into memcpy(c <- b). 706 // 707 // TODO: If the code between M and MDep is transparent to the destination "c", 708 // then we could still perform the xform by moving M up to the first memcpy. 709 // 710 // NOTE: This is conservative, it will stop on any read from the source loc, 711 // not just the defining memcpy. 712 MemDepResult SourceDep = 713 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep), 714 false, M, M->getParent()); 715 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 716 return false; 717 718 // If the dest of the second might alias the source of the first, then the 719 // source and dest might overlap. We still want to eliminate the intermediate 720 // value, but we have to generate a memmove instead of memcpy. 721 bool UseMemMove = false; 722 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep))) 723 UseMemMove = true; 724 725 // If all checks passed, then we can transform M. 726 727 // Make sure to use the lesser of the alignment of the source and the dest 728 // since we're changing where we're reading from, but don't want to increase 729 // the alignment past what can be read from or written to. 730 // TODO: Is this worth it if we're creating a less aligned memcpy? For 731 // example we could be moving from movaps -> movq on x86. 732 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment()); 733 734 IRBuilder<> Builder(M); 735 if (UseMemMove) 736 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(), 737 Align, M->isVolatile()); 738 else 739 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(), 740 Align, M->isVolatile()); 741 742 // Remove the instruction we're replacing. 743 MD->removeInstruction(M); 744 M->eraseFromParent(); 745 ++NumMemCpyInstr; 746 return true; 747 } 748 749 750 /// processMemCpy - perform simplification of memcpy's. If we have memcpy A 751 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite 752 /// B to be a memcpy from X to Z (or potentially a memmove, depending on 753 /// circumstances). This allows later passes to remove the first memcpy 754 /// altogether. 755 bool MemCpyOpt::processMemCpy(MemCpyInst *M) { 756 // We can only optimize statically-sized memcpy's that are non-volatile. 757 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength()); 758 if (CopySize == 0 || M->isVolatile()) return false; 759 760 // If the source and destination of the memcpy are the same, then zap it. 761 if (M->getSource() == M->getDest()) { 762 MD->removeInstruction(M); 763 M->eraseFromParent(); 764 return false; 765 } 766 767 // If copying from a constant, try to turn the memcpy into a memset. 768 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource())) 769 if (GV->isConstant() && GV->hasDefinitiveInitializer()) 770 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) { 771 IRBuilder<> Builder(M); 772 Builder.CreateMemSet(M->getRawDest(), ByteVal, CopySize, 773 M->getAlignment(), false); 774 MD->removeInstruction(M); 775 M->eraseFromParent(); 776 ++NumCpyToSet; 777 return true; 778 } 779 780 // The are two possible optimizations we can do for memcpy: 781 // a) memcpy-memcpy xform which exposes redundance for DSE. 782 // b) call-memcpy xform for return slot optimization. 783 MemDepResult DepInfo = MD->getDependency(M); 784 if (!DepInfo.isClobber()) 785 return false; 786 787 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst())) 788 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue()); 789 790 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) { 791 if (performCallSlotOptzn(M, M->getDest(), M->getSource(), 792 CopySize->getZExtValue(), C)) { 793 MD->removeInstruction(M); 794 M->eraseFromParent(); 795 return true; 796 } 797 } 798 799 return false; 800 } 801 802 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst 803 /// are guaranteed not to alias. 804 bool MemCpyOpt::processMemMove(MemMoveInst *M) { 805 AliasAnalysis &AA = getAnalysis<AliasAnalysis>(); 806 807 // See if the pointers alias. 808 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M))) 809 return false; 810 811 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n"); 812 813 // If not, then we know we can transform this. 814 Module *Mod = M->getParent()->getParent()->getParent(); 815 const Type *ArgTys[3] = { M->getRawDest()->getType(), 816 M->getRawSource()->getType(), 817 M->getLength()->getType() }; 818 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, 819 ArgTys, 3)); 820 821 // MemDep may have over conservative information about this instruction, just 822 // conservatively flush it from the cache. 823 MD->removeInstruction(M); 824 825 ++NumMoveToCpy; 826 return true; 827 } 828 829 /// processByValArgument - This is called on every byval argument in call sites. 830 bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { 831 if (TD == 0) return false; 832 833 // Find out what feeds this byval argument. 834 Value *ByValArg = CS.getArgument(ArgNo); 835 const Type *ByValTy =cast<PointerType>(ByValArg->getType())->getElementType(); 836 uint64_t ByValSize = TD->getTypeAllocSize(ByValTy); 837 MemDepResult DepInfo = 838 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize), 839 true, CS.getInstruction(), 840 CS.getInstruction()->getParent()); 841 if (!DepInfo.isClobber()) 842 return false; 843 844 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by 845 // a memcpy, see if we can byval from the source of the memcpy instead of the 846 // result. 847 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst()); 848 if (MDep == 0 || MDep->isVolatile() || 849 ByValArg->stripPointerCasts() != MDep->getDest()) 850 return false; 851 852 // The length of the memcpy must be larger or equal to the size of the byval. 853 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength()); 854 if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize) 855 return false; 856 857 // Get the alignment of the byval. If it is greater than the memcpy, then we 858 // can't do the substitution. If the call doesn't specify the alignment, then 859 // it is some target specific value that we can't know. 860 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1); 861 if (ByValAlign == 0 || MDep->getAlignment() < ByValAlign) 862 return false; 863 864 // Verify that the copied-from memory doesn't change in between the memcpy and 865 // the byval call. 866 // memcpy(a <- b) 867 // *b = 42; 868 // foo(*a) 869 // It would be invalid to transform the second memcpy into foo(*b). 870 // 871 // NOTE: This is conservative, it will stop on any read from the source loc, 872 // not just the defining memcpy. 873 MemDepResult SourceDep = 874 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep), 875 false, CS.getInstruction(), MDep->getParent()); 876 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) 877 return false; 878 879 Value *TmpCast = MDep->getSource(); 880 if (MDep->getSource()->getType() != ByValArg->getType()) 881 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(), 882 "tmpcast", CS.getInstruction()); 883 884 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n" 885 << " " << *MDep << "\n" 886 << " " << *CS.getInstruction() << "\n"); 887 888 // Otherwise we're good! Update the byval argument. 889 CS.setArgument(ArgNo, TmpCast); 890 ++NumMemCpyInstr; 891 return true; 892 } 893 894 /// iterateOnFunction - Executes one iteration of MemCpyOpt. 895 bool MemCpyOpt::iterateOnFunction(Function &F) { 896 bool MadeChange = false; 897 898 // Walk all instruction in the function. 899 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { 900 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { 901 // Avoid invalidating the iterator. 902 Instruction *I = BI++; 903 904 bool RepeatInstruction = false; 905 906 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 907 MadeChange |= processStore(SI, BI); 908 else if (MemSetInst *M = dyn_cast<MemSetInst>(I)) 909 RepeatInstruction = processMemSet(M, BI); 910 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I)) 911 RepeatInstruction = processMemCpy(M); 912 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) 913 RepeatInstruction = processMemMove(M); 914 else if (CallSite CS = (Value*)I) { 915 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 916 if (CS.paramHasAttr(i+1, Attribute::ByVal)) 917 MadeChange |= processByValArgument(CS, i); 918 } 919 920 // Reprocess the instruction if desired. 921 if (RepeatInstruction) { 922 if (BI != BB->begin()) --BI; 923 MadeChange = true; 924 } 925 } 926 } 927 928 return MadeChange; 929 } 930 931 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a 932 // function. 933 // 934 bool MemCpyOpt::runOnFunction(Function &F) { 935 bool MadeChange = false; 936 MD = &getAnalysis<MemoryDependenceAnalysis>(); 937 TD = getAnalysisIfAvailable<TargetData>(); 938 while (1) { 939 if (!iterateOnFunction(F)) 940 break; 941 MadeChange = true; 942 } 943 944 MD = 0; 945 return MadeChange; 946 } 947