1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visit functions for load, store and alloca. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombine.h" 15 #include "llvm/ADT/Statistic.h" 16 #include "llvm/Analysis/Loads.h" 17 #include "llvm/IR/DataLayout.h" 18 #include "llvm/IR/LLVMContext.h" 19 #include "llvm/IR/IntrinsicInst.h" 20 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 21 #include "llvm/Transforms/Utils/Local.h" 22 using namespace llvm; 23 24 #define DEBUG_TYPE "instcombine" 25 26 STATISTIC(NumDeadStore, "Number of dead stores eliminated"); 27 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global"); 28 29 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to 30 /// some part of a constant global variable. This intentionally only accepts 31 /// constant expressions because we can't rewrite arbitrary instructions. 32 static bool pointsToConstantGlobal(Value *V) { 33 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 34 return GV->isConstant(); 35 36 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 37 if (CE->getOpcode() == Instruction::BitCast || 38 CE->getOpcode() == Instruction::AddrSpaceCast || 39 CE->getOpcode() == Instruction::GetElementPtr) 40 return pointsToConstantGlobal(CE->getOperand(0)); 41 } 42 return false; 43 } 44 45 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 46 /// pointer to an alloca. Ignore any reads of the pointer, return false if we 47 /// see any stores or other unknown uses. If we see pointer arithmetic, keep 48 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse 49 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 50 /// the alloca, and if the source pointer is a pointer to a constant global, we 51 /// can optimize this. 52 static bool 53 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, 54 SmallVectorImpl<Instruction *> &ToDelete) { 55 // We track lifetime intrinsics as we encounter them. If we decide to go 56 // ahead and replace the value with the global, this lets the caller quickly 57 // eliminate the markers. 58 59 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect; 60 ValuesToInspect.push_back(std::make_pair(V, false)); 61 while (!ValuesToInspect.empty()) { 62 auto ValuePair = ValuesToInspect.pop_back_val(); 63 const bool IsOffset = ValuePair.second; 64 for (auto &U : ValuePair.first->uses()) { 65 Instruction *I = cast<Instruction>(U.getUser()); 66 67 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 68 // Ignore non-volatile loads, they are always ok. 69 if (!LI->isSimple()) return false; 70 continue; 71 } 72 73 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) { 74 // If uses of the bitcast are ok, we are ok. 75 ValuesToInspect.push_back(std::make_pair(I, IsOffset)); 76 continue; 77 } 78 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 79 // If the GEP has all zero indices, it doesn't offset the pointer. If it 80 // doesn't, it does. 81 ValuesToInspect.push_back( 82 std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices())); 83 continue; 84 } 85 86 if (CallSite CS = I) { 87 // If this is the function being called then we treat it like a load and 88 // ignore it. 89 if (CS.isCallee(&U)) 90 continue; 91 92 // Inalloca arguments are clobbered by the call. 93 unsigned ArgNo = CS.getArgumentNo(&U); 94 if (CS.isInAllocaArgument(ArgNo)) 95 return false; 96 97 // If this is a readonly/readnone call site, then we know it is just a 98 // load (but one that potentially returns the value itself), so we can 99 // ignore it if we know that the value isn't captured. 100 if (CS.onlyReadsMemory() && 101 (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo))) 102 continue; 103 104 // If this is being passed as a byval argument, the caller is making a 105 // copy, so it is only a read of the alloca. 106 if (CS.isByValArgument(ArgNo)) 107 continue; 108 } 109 110 // Lifetime intrinsics can be handled by the caller. 111 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 112 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 113 II->getIntrinsicID() == Intrinsic::lifetime_end) { 114 assert(II->use_empty() && "Lifetime markers have no result to use!"); 115 ToDelete.push_back(II); 116 continue; 117 } 118 } 119 120 // If this is isn't our memcpy/memmove, reject it as something we can't 121 // handle. 122 MemTransferInst *MI = dyn_cast<MemTransferInst>(I); 123 if (!MI) 124 return false; 125 126 // If the transfer is using the alloca as a source of the transfer, then 127 // ignore it since it is a load (unless the transfer is volatile). 128 if (U.getOperandNo() == 1) { 129 if (MI->isVolatile()) return false; 130 continue; 131 } 132 133 // If we already have seen a copy, reject the second one. 134 if (TheCopy) return false; 135 136 // If the pointer has been offset from the start of the alloca, we can't 137 // safely handle this. 138 if (IsOffset) return false; 139 140 // If the memintrinsic isn't using the alloca as the dest, reject it. 141 if (U.getOperandNo() != 0) return false; 142 143 // If the source of the memcpy/move is not a constant global, reject it. 144 if (!pointsToConstantGlobal(MI->getSource())) 145 return false; 146 147 // Otherwise, the transform is safe. Remember the copy instruction. 148 TheCopy = MI; 149 } 150 } 151 return true; 152 } 153 154 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 155 /// modified by a copy from a constant global. If we can prove this, we can 156 /// replace any uses of the alloca with uses of the global directly. 157 static MemTransferInst * 158 isOnlyCopiedFromConstantGlobal(AllocaInst *AI, 159 SmallVectorImpl<Instruction *> &ToDelete) { 160 MemTransferInst *TheCopy = nullptr; 161 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) 162 return TheCopy; 163 return nullptr; 164 } 165 166 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { 167 // Ensure that the alloca array size argument has type intptr_t, so that 168 // any casting is exposed early. 169 if (DL) { 170 Type *IntPtrTy = DL->getIntPtrType(AI.getType()); 171 if (AI.getArraySize()->getType() != IntPtrTy) { 172 Value *V = Builder->CreateIntCast(AI.getArraySize(), 173 IntPtrTy, false); 174 AI.setOperand(0, V); 175 return &AI; 176 } 177 } 178 179 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 180 if (AI.isArrayAllocation()) { // Check C != 1 181 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { 182 Type *NewTy = 183 ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); 184 AllocaInst *New = Builder->CreateAlloca(NewTy, nullptr, AI.getName()); 185 New->setAlignment(AI.getAlignment()); 186 187 // Scan to the end of the allocation instructions, to skip over a block of 188 // allocas if possible...also skip interleaved debug info 189 // 190 BasicBlock::iterator It = New; 191 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It; 192 193 // Now that I is pointing to the first non-allocation-inst in the block, 194 // insert our getelementptr instruction... 195 // 196 Type *IdxTy = DL 197 ? DL->getIntPtrType(AI.getType()) 198 : Type::getInt64Ty(AI.getContext()); 199 Value *NullIdx = Constant::getNullValue(IdxTy); 200 Value *Idx[2] = { NullIdx, NullIdx }; 201 Instruction *GEP = 202 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub"); 203 InsertNewInstBefore(GEP, *It); 204 205 // Now make everything use the getelementptr instead of the original 206 // allocation. 207 return ReplaceInstUsesWith(AI, GEP); 208 } else if (isa<UndefValue>(AI.getArraySize())) { 209 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); 210 } 211 } 212 213 if (DL && AI.getAllocatedType()->isSized()) { 214 // If the alignment is 0 (unspecified), assign it the preferred alignment. 215 if (AI.getAlignment() == 0) 216 AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType())); 217 218 // Move all alloca's of zero byte objects to the entry block and merge them 219 // together. Note that we only do this for alloca's, because malloc should 220 // allocate and return a unique pointer, even for a zero byte allocation. 221 if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) { 222 // For a zero sized alloca there is no point in doing an array allocation. 223 // This is helpful if the array size is a complicated expression not used 224 // elsewhere. 225 if (AI.isArrayAllocation()) { 226 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1)); 227 return &AI; 228 } 229 230 // Get the first instruction in the entry block. 231 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock(); 232 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg(); 233 if (FirstInst != &AI) { 234 // If the entry block doesn't start with a zero-size alloca then move 235 // this one to the start of the entry block. There is no problem with 236 // dominance as the array size was forced to a constant earlier already. 237 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst); 238 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() || 239 DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) { 240 AI.moveBefore(FirstInst); 241 return &AI; 242 } 243 244 // If the alignment of the entry block alloca is 0 (unspecified), 245 // assign it the preferred alignment. 246 if (EntryAI->getAlignment() == 0) 247 EntryAI->setAlignment( 248 DL->getPrefTypeAlignment(EntryAI->getAllocatedType())); 249 // Replace this zero-sized alloca with the one at the start of the entry 250 // block after ensuring that the address will be aligned enough for both 251 // types. 252 unsigned MaxAlign = std::max(EntryAI->getAlignment(), 253 AI.getAlignment()); 254 EntryAI->setAlignment(MaxAlign); 255 if (AI.getType() != EntryAI->getType()) 256 return new BitCastInst(EntryAI, AI.getType()); 257 return ReplaceInstUsesWith(AI, EntryAI); 258 } 259 } 260 } 261 262 if (AI.getAlignment()) { 263 // Check to see if this allocation is only modified by a memcpy/memmove from 264 // a constant global whose alignment is equal to or exceeds that of the 265 // allocation. If this is the case, we can change all users to use 266 // the constant global instead. This is commonly produced by the CFE by 267 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 268 // is only subsequently read. 269 SmallVector<Instruction *, 4> ToDelete; 270 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { 271 unsigned SourceAlign = getOrEnforceKnownAlignment(Copy->getSource(), 272 AI.getAlignment(), 273 DL, AT, &AI, DT); 274 if (AI.getAlignment() <= SourceAlign) { 275 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); 276 DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); 277 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) 278 EraseInstFromFunction(*ToDelete[i]); 279 Constant *TheSrc = cast<Constant>(Copy->getSource()); 280 Constant *Cast 281 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType()); 282 Instruction *NewI = ReplaceInstUsesWith(AI, Cast); 283 EraseInstFromFunction(*Copy); 284 ++NumGlobalCopies; 285 return NewI; 286 } 287 } 288 } 289 290 // At last, use the generic allocation site handler to aggressively remove 291 // unused allocas. 292 return visitAllocSite(AI); 293 } 294 295 /// \brief Helper to combine a load to a new type. 296 /// 297 /// This just does the work of combining a load to a new type. It handles 298 /// metadata, etc., and returns the new instruction. The \c NewTy should be the 299 /// loaded *value* type. This will convert it to a pointer, cast the operand to 300 /// that pointer type, load it, etc. 301 /// 302 /// Note that this will create all of the instructions with whatever insert 303 /// point the \c InstCombiner currently is using. 304 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) { 305 Value *Ptr = LI.getPointerOperand(); 306 unsigned AS = LI.getPointerAddressSpace(); 307 SmallVector<std::pair<unsigned, Value *>, 8> MD; 308 LI.getAllMetadata(MD); 309 310 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad( 311 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)), 312 LI.getAlignment(), LI.getName()); 313 for (const auto &MDPair : MD) { 314 unsigned ID = MDPair.first; 315 Value *N = MDPair.second; 316 // Note, essentially every kind of metadata should be preserved here! This 317 // routine is supposed to clone a load instruction changing *only its type*. 318 // The only metadata it makes sense to drop is metadata which is invalidated 319 // when the pointer type changes. This should essentially never be the case 320 // in LLVM, but we explicitly switch over only known metadata to be 321 // conservatively correct. If you are adding metadata to LLVM which pertains 322 // to loads, you almost certainly want to add it here. 323 switch (ID) { 324 case LLVMContext::MD_dbg: 325 case LLVMContext::MD_tbaa: 326 case LLVMContext::MD_prof: 327 case LLVMContext::MD_fpmath: 328 case LLVMContext::MD_tbaa_struct: 329 case LLVMContext::MD_invariant_load: 330 case LLVMContext::MD_alias_scope: 331 case LLVMContext::MD_noalias: 332 case LLVMContext::MD_nontemporal: 333 case LLVMContext::MD_mem_parallel_loop_access: 334 case LLVMContext::MD_nonnull: 335 // All of these directly apply. 336 NewLoad->setMetadata(ID, N); 337 break; 338 339 case LLVMContext::MD_range: 340 // FIXME: It would be nice to propagate this in some way, but the type 341 // conversions make it hard. 342 break; 343 } 344 } 345 return NewLoad; 346 } 347 348 /// \brief Combine loads to match the type of value their uses after looking 349 /// through intervening bitcasts. 350 /// 351 /// The core idea here is that if the result of a load is used in an operation, 352 /// we should load the type most conducive to that operation. For example, when 353 /// loading an integer and converting that immediately to a pointer, we should 354 /// instead directly load a pointer. 355 /// 356 /// However, this routine must never change the width of a load or the number of 357 /// loads as that would introduce a semantic change. This combine is expected to 358 /// be a semantic no-op which just allows loads to more closely model the types 359 /// of their consuming operations. 360 /// 361 /// Currently, we also refuse to change the precise type used for an atomic load 362 /// or a volatile load. This is debatable, and might be reasonable to change 363 /// later. However, it is risky in case some backend or other part of LLVM is 364 /// relying on the exact type loaded to select appropriate atomic operations. 365 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) { 366 // FIXME: We could probably with some care handle both volatile and atomic 367 // loads here but it isn't clear that this is important. 368 if (!LI.isSimple()) 369 return nullptr; 370 371 if (LI.use_empty()) 372 return nullptr; 373 374 375 // Fold away bit casts of the loaded value by loading the desired type. 376 if (LI.hasOneUse()) 377 if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) { 378 LoadInst *NewLoad = combineLoadToNewType(IC, LI, BC->getDestTy()); 379 BC->replaceAllUsesWith(NewLoad); 380 IC.EraseInstFromFunction(*BC); 381 return &LI; 382 } 383 384 // FIXME: We should also canonicalize loads of vectors when their elements are 385 // cast to other types. 386 return nullptr; 387 } 388 389 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { 390 Value *Op = LI.getOperand(0); 391 392 // Try to canonicalize the loaded type. 393 if (Instruction *Res = combineLoadToOperationType(*this, LI)) 394 return Res; 395 396 // Attempt to improve the alignment. 397 if (DL) { 398 unsigned KnownAlign = 399 getOrEnforceKnownAlignment(Op, DL->getPrefTypeAlignment(LI.getType()), 400 DL, AT, &LI, DT); 401 unsigned LoadAlign = LI.getAlignment(); 402 unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign : 403 DL->getABITypeAlignment(LI.getType()); 404 405 if (KnownAlign > EffectiveLoadAlign) 406 LI.setAlignment(KnownAlign); 407 else if (LoadAlign == 0) 408 LI.setAlignment(EffectiveLoadAlign); 409 } 410 411 // None of the following transforms are legal for volatile/atomic loads. 412 // FIXME: Some of it is okay for atomic loads; needs refactoring. 413 if (!LI.isSimple()) return nullptr; 414 415 // Do really simple store-to-load forwarding and load CSE, to catch cases 416 // where there are several consecutive memory accesses to the same location, 417 // separated by a few arithmetic operations. 418 BasicBlock::iterator BBI = &LI; 419 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6)) 420 return ReplaceInstUsesWith( 421 LI, Builder->CreateBitCast(AvailableVal, LI.getType())); 422 423 // load(gep null, ...) -> unreachable 424 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) { 425 const Value *GEPI0 = GEPI->getOperand(0); 426 // TODO: Consider a target hook for valid address spaces for this xform. 427 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){ 428 // Insert a new store to null instruction before the load to indicate 429 // that this code is not reachable. We do this instead of inserting 430 // an unreachable instruction directly because we cannot modify the 431 // CFG. 432 new StoreInst(UndefValue::get(LI.getType()), 433 Constant::getNullValue(Op->getType()), &LI); 434 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType())); 435 } 436 } 437 438 // load null/undef -> unreachable 439 // TODO: Consider a target hook for valid address spaces for this xform. 440 if (isa<UndefValue>(Op) || 441 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) { 442 // Insert a new store to null instruction before the load to indicate that 443 // this code is not reachable. We do this instead of inserting an 444 // unreachable instruction directly because we cannot modify the CFG. 445 new StoreInst(UndefValue::get(LI.getType()), 446 Constant::getNullValue(Op->getType()), &LI); 447 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType())); 448 } 449 450 if (Op->hasOneUse()) { 451 // Change select and PHI nodes to select values instead of addresses: this 452 // helps alias analysis out a lot, allows many others simplifications, and 453 // exposes redundancy in the code. 454 // 455 // Note that we cannot do the transformation unless we know that the 456 // introduced loads cannot trap! Something like this is valid as long as 457 // the condition is always false: load (select bool %C, int* null, int* %G), 458 // but it would not be valid if we transformed it to load from null 459 // unconditionally. 460 // 461 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) { 462 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). 463 unsigned Align = LI.getAlignment(); 464 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) && 465 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) { 466 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1), 467 SI->getOperand(1)->getName()+".val"); 468 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2), 469 SI->getOperand(2)->getName()+".val"); 470 V1->setAlignment(Align); 471 V2->setAlignment(Align); 472 return SelectInst::Create(SI->getCondition(), V1, V2); 473 } 474 475 // load (select (cond, null, P)) -> load P 476 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1))) 477 if (C->isNullValue()) { 478 LI.setOperand(0, SI->getOperand(2)); 479 return &LI; 480 } 481 482 // load (select (cond, P, null)) -> load P 483 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2))) 484 if (C->isNullValue()) { 485 LI.setOperand(0, SI->getOperand(1)); 486 return &LI; 487 } 488 } 489 } 490 return nullptr; 491 } 492 493 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P 494 /// when possible. This makes it generally easy to do alias analysis and/or 495 /// SROA/mem2reg of the memory object. 496 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { 497 User *CI = cast<User>(SI.getOperand(1)); 498 Value *CastOp = CI->getOperand(0); 499 500 Type *DestPTy = CI->getType()->getPointerElementType(); 501 PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType()); 502 if (!SrcTy) return nullptr; 503 504 Type *SrcPTy = SrcTy->getElementType(); 505 506 if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy()) 507 return nullptr; 508 509 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep" 510 /// to its first element. This allows us to handle things like: 511 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*) 512 /// on 32-bit hosts. 513 SmallVector<Value*, 4> NewGEPIndices; 514 515 // If the source is an array, the code below will not succeed. Check to 516 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for 517 // constants. 518 if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) { 519 // Index through pointer. 520 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext())); 521 NewGEPIndices.push_back(Zero); 522 523 while (1) { 524 if (StructType *STy = dyn_cast<StructType>(SrcPTy)) { 525 if (!STy->getNumElements()) /* Struct can be empty {} */ 526 break; 527 NewGEPIndices.push_back(Zero); 528 SrcPTy = STy->getElementType(0); 529 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) { 530 NewGEPIndices.push_back(Zero); 531 SrcPTy = ATy->getElementType(); 532 } else { 533 break; 534 } 535 } 536 537 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace()); 538 } 539 540 if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy()) 541 return nullptr; 542 543 // If the pointers point into different address spaces don't do the 544 // transformation. 545 if (SrcTy->getAddressSpace() != CI->getType()->getPointerAddressSpace()) 546 return nullptr; 547 548 // If the pointers point to values of different sizes don't do the 549 // transformation. 550 if (!IC.getDataLayout() || 551 IC.getDataLayout()->getTypeSizeInBits(SrcPTy) != 552 IC.getDataLayout()->getTypeSizeInBits(DestPTy)) 553 return nullptr; 554 555 // If the pointers point to pointers to different address spaces don't do the 556 // transformation. It is not safe to introduce an addrspacecast instruction in 557 // this case since, depending on the target, addrspacecast may not be a no-op 558 // cast. 559 if (SrcPTy->isPointerTy() && DestPTy->isPointerTy() && 560 SrcPTy->getPointerAddressSpace() != DestPTy->getPointerAddressSpace()) 561 return nullptr; 562 563 // Okay, we are casting from one integer or pointer type to another of 564 // the same size. Instead of casting the pointer before 565 // the store, cast the value to be stored. 566 Value *NewCast; 567 Instruction::CastOps opcode = Instruction::BitCast; 568 Type* CastSrcTy = DestPTy; 569 Type* CastDstTy = SrcPTy; 570 if (CastDstTy->isPointerTy()) { 571 if (CastSrcTy->isIntegerTy()) 572 opcode = Instruction::IntToPtr; 573 } else if (CastDstTy->isIntegerTy()) { 574 if (CastSrcTy->isPointerTy()) 575 opcode = Instruction::PtrToInt; 576 } 577 578 // SIOp0 is a pointer to aggregate and this is a store to the first field, 579 // emit a GEP to index into its first field. 580 if (!NewGEPIndices.empty()) 581 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices); 582 583 Value *SIOp0 = SI.getOperand(0); 584 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy, 585 SIOp0->getName()+".c"); 586 SI.setOperand(0, NewCast); 587 SI.setOperand(1, CastOp); 588 return &SI; 589 } 590 591 /// equivalentAddressValues - Test if A and B will obviously have the same 592 /// value. This includes recognizing that %t0 and %t1 will have the same 593 /// value in code like this: 594 /// %t0 = getelementptr \@a, 0, 3 595 /// store i32 0, i32* %t0 596 /// %t1 = getelementptr \@a, 0, 3 597 /// %t2 = load i32* %t1 598 /// 599 static bool equivalentAddressValues(Value *A, Value *B) { 600 // Test if the values are trivially equivalent. 601 if (A == B) return true; 602 603 // Test if the values come form identical arithmetic instructions. 604 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because 605 // its only used to compare two uses within the same basic block, which 606 // means that they'll always either have the same value or one of them 607 // will have an undefined value. 608 if (isa<BinaryOperator>(A) || 609 isa<CastInst>(A) || 610 isa<PHINode>(A) || 611 isa<GetElementPtrInst>(A)) 612 if (Instruction *BI = dyn_cast<Instruction>(B)) 613 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) 614 return true; 615 616 // Otherwise they may not be equivalent. 617 return false; 618 } 619 620 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { 621 Value *Val = SI.getOperand(0); 622 Value *Ptr = SI.getOperand(1); 623 624 // Attempt to improve the alignment. 625 if (DL) { 626 unsigned KnownAlign = 627 getOrEnforceKnownAlignment(Ptr, DL->getPrefTypeAlignment(Val->getType()), 628 DL, AT, &SI, DT); 629 unsigned StoreAlign = SI.getAlignment(); 630 unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign : 631 DL->getABITypeAlignment(Val->getType()); 632 633 if (KnownAlign > EffectiveStoreAlign) 634 SI.setAlignment(KnownAlign); 635 else if (StoreAlign == 0) 636 SI.setAlignment(EffectiveStoreAlign); 637 } 638 639 // Don't hack volatile/atomic stores. 640 // FIXME: Some bits are legal for atomic stores; needs refactoring. 641 if (!SI.isSimple()) return nullptr; 642 643 // If the RHS is an alloca with a single use, zapify the store, making the 644 // alloca dead. 645 if (Ptr->hasOneUse()) { 646 if (isa<AllocaInst>(Ptr)) 647 return EraseInstFromFunction(SI); 648 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 649 if (isa<AllocaInst>(GEP->getOperand(0))) { 650 if (GEP->getOperand(0)->hasOneUse()) 651 return EraseInstFromFunction(SI); 652 } 653 } 654 } 655 656 // Do really simple DSE, to catch cases where there are several consecutive 657 // stores to the same location, separated by a few arithmetic operations. This 658 // situation often occurs with bitfield accesses. 659 BasicBlock::iterator BBI = &SI; 660 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts; 661 --ScanInsts) { 662 --BBI; 663 // Don't count debug info directives, lest they affect codegen, 664 // and we skip pointer-to-pointer bitcasts, which are NOPs. 665 if (isa<DbgInfoIntrinsic>(BBI) || 666 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { 667 ScanInsts++; 668 continue; 669 } 670 671 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { 672 // Prev store isn't volatile, and stores to the same location? 673 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1), 674 SI.getOperand(1))) { 675 ++NumDeadStore; 676 ++BBI; 677 EraseInstFromFunction(*PrevSI); 678 continue; 679 } 680 break; 681 } 682 683 // If this is a load, we have to stop. However, if the loaded value is from 684 // the pointer we're loading and is producing the pointer we're storing, 685 // then *this* store is dead (X = load P; store X -> P). 686 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { 687 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) && 688 LI->isSimple()) 689 return EraseInstFromFunction(SI); 690 691 // Otherwise, this is a load from some other location. Stores before it 692 // may not be dead. 693 break; 694 } 695 696 // Don't skip over loads or things that can modify memory. 697 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory()) 698 break; 699 } 700 701 // store X, null -> turns into 'unreachable' in SimplifyCFG 702 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) { 703 if (!isa<UndefValue>(Val)) { 704 SI.setOperand(0, UndefValue::get(Val->getType())); 705 if (Instruction *U = dyn_cast<Instruction>(Val)) 706 Worklist.Add(U); // Dropped a use. 707 } 708 return nullptr; // Do not modify these! 709 } 710 711 // store undef, Ptr -> noop 712 if (isa<UndefValue>(Val)) 713 return EraseInstFromFunction(SI); 714 715 // If the pointer destination is a cast, see if we can fold the cast into the 716 // source instead. 717 if (isa<CastInst>(Ptr)) 718 if (Instruction *Res = InstCombineStoreToCast(*this, SI)) 719 return Res; 720 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 721 if (CE->isCast()) 722 if (Instruction *Res = InstCombineStoreToCast(*this, SI)) 723 return Res; 724 725 726 // If this store is the last instruction in the basic block (possibly 727 // excepting debug info instructions), and if the block ends with an 728 // unconditional branch, try to move it to the successor block. 729 BBI = &SI; 730 do { 731 ++BBI; 732 } while (isa<DbgInfoIntrinsic>(BBI) || 733 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())); 734 if (BranchInst *BI = dyn_cast<BranchInst>(BBI)) 735 if (BI->isUnconditional()) 736 if (SimplifyStoreAtEndOfBlock(SI)) 737 return nullptr; // xform done! 738 739 return nullptr; 740 } 741 742 /// SimplifyStoreAtEndOfBlock - Turn things like: 743 /// if () { *P = v1; } else { *P = v2 } 744 /// into a phi node with a store in the successor. 745 /// 746 /// Simplify things like: 747 /// *P = v1; if () { *P = v2; } 748 /// into a phi node with a store in the successor. 749 /// 750 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { 751 BasicBlock *StoreBB = SI.getParent(); 752 753 // Check to see if the successor block has exactly two incoming edges. If 754 // so, see if the other predecessor contains a store to the same location. 755 // if so, insert a PHI node (if needed) and move the stores down. 756 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0); 757 758 // Determine whether Dest has exactly two predecessors and, if so, compute 759 // the other predecessor. 760 pred_iterator PI = pred_begin(DestBB); 761 BasicBlock *P = *PI; 762 BasicBlock *OtherBB = nullptr; 763 764 if (P != StoreBB) 765 OtherBB = P; 766 767 if (++PI == pred_end(DestBB)) 768 return false; 769 770 P = *PI; 771 if (P != StoreBB) { 772 if (OtherBB) 773 return false; 774 OtherBB = P; 775 } 776 if (++PI != pred_end(DestBB)) 777 return false; 778 779 // Bail out if all the relevant blocks aren't distinct (this can happen, 780 // for example, if SI is in an infinite loop) 781 if (StoreBB == DestBB || OtherBB == DestBB) 782 return false; 783 784 // Verify that the other block ends in a branch and is not otherwise empty. 785 BasicBlock::iterator BBI = OtherBB->getTerminator(); 786 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI); 787 if (!OtherBr || BBI == OtherBB->begin()) 788 return false; 789 790 // If the other block ends in an unconditional branch, check for the 'if then 791 // else' case. there is an instruction before the branch. 792 StoreInst *OtherStore = nullptr; 793 if (OtherBr->isUnconditional()) { 794 --BBI; 795 // Skip over debugging info. 796 while (isa<DbgInfoIntrinsic>(BBI) || 797 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { 798 if (BBI==OtherBB->begin()) 799 return false; 800 --BBI; 801 } 802 // If this isn't a store, isn't a store to the same location, or is not the 803 // right kind of store, bail out. 804 OtherStore = dyn_cast<StoreInst>(BBI); 805 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) || 806 !SI.isSameOperationAs(OtherStore)) 807 return false; 808 } else { 809 // Otherwise, the other block ended with a conditional branch. If one of the 810 // destinations is StoreBB, then we have the if/then case. 811 if (OtherBr->getSuccessor(0) != StoreBB && 812 OtherBr->getSuccessor(1) != StoreBB) 813 return false; 814 815 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an 816 // if/then triangle. See if there is a store to the same ptr as SI that 817 // lives in OtherBB. 818 for (;; --BBI) { 819 // Check to see if we find the matching store. 820 if ((OtherStore = dyn_cast<StoreInst>(BBI))) { 821 if (OtherStore->getOperand(1) != SI.getOperand(1) || 822 !SI.isSameOperationAs(OtherStore)) 823 return false; 824 break; 825 } 826 // If we find something that may be using or overwriting the stored 827 // value, or if we run out of instructions, we can't do the xform. 828 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() || 829 BBI == OtherBB->begin()) 830 return false; 831 } 832 833 // In order to eliminate the store in OtherBr, we have to 834 // make sure nothing reads or overwrites the stored value in 835 // StoreBB. 836 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) { 837 // FIXME: This should really be AA driven. 838 if (I->mayReadFromMemory() || I->mayWriteToMemory()) 839 return false; 840 } 841 } 842 843 // Insert a PHI node now if we need it. 844 Value *MergedVal = OtherStore->getOperand(0); 845 if (MergedVal != SI.getOperand(0)) { 846 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge"); 847 PN->addIncoming(SI.getOperand(0), SI.getParent()); 848 PN->addIncoming(OtherStore->getOperand(0), OtherBB); 849 MergedVal = InsertNewInstBefore(PN, DestBB->front()); 850 } 851 852 // Advance to a place where it is safe to insert the new store and 853 // insert it. 854 BBI = DestBB->getFirstInsertionPt(); 855 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), 856 SI.isVolatile(), 857 SI.getAlignment(), 858 SI.getOrdering(), 859 SI.getSynchScope()); 860 InsertNewInstBefore(NewSI, *BBI); 861 NewSI->setDebugLoc(OtherStore->getDebugLoc()); 862 863 // If the two stores had AA tags, merge them. 864 AAMDNodes AATags; 865 SI.getAAMetadata(AATags); 866 if (AATags) { 867 OtherStore->getAAMetadata(AATags, /* Merge = */ true); 868 NewSI->setAAMetadata(AATags); 869 } 870 871 // Nuke the old stores. 872 EraseInstFromFunction(SI); 873 EraseInstFromFunction(*OtherStore); 874 return true; 875 } 876