1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visit functions for load, store and alloca. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/SmallString.h" 16 #include "llvm/ADT/Statistic.h" 17 #include "llvm/Analysis/Loads.h" 18 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/LLVMContext.h" 20 #include "llvm/IR/IntrinsicInst.h" 21 #include "llvm/IR/MDBuilder.h" 22 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 23 #include "llvm/Transforms/Utils/Local.h" 24 using namespace llvm; 25 26 #define DEBUG_TYPE "instcombine" 27 28 STATISTIC(NumDeadStore, "Number of dead stores eliminated"); 29 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global"); 30 31 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to 32 /// some part of a constant global variable. This intentionally only accepts 33 /// constant expressions because we can't rewrite arbitrary instructions. 34 static bool pointsToConstantGlobal(Value *V) { 35 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 36 return GV->isConstant(); 37 38 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 39 if (CE->getOpcode() == Instruction::BitCast || 40 CE->getOpcode() == Instruction::AddrSpaceCast || 41 CE->getOpcode() == Instruction::GetElementPtr) 42 return pointsToConstantGlobal(CE->getOperand(0)); 43 } 44 return false; 45 } 46 47 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 48 /// pointer to an alloca. Ignore any reads of the pointer, return false if we 49 /// see any stores or other unknown uses. If we see pointer arithmetic, keep 50 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse 51 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 52 /// the alloca, and if the source pointer is a pointer to a constant global, we 53 /// can optimize this. 54 static bool 55 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, 56 SmallVectorImpl<Instruction *> &ToDelete) { 57 // We track lifetime intrinsics as we encounter them. If we decide to go 58 // ahead and replace the value with the global, this lets the caller quickly 59 // eliminate the markers. 60 61 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect; 62 ValuesToInspect.emplace_back(V, false); 63 while (!ValuesToInspect.empty()) { 64 auto ValuePair = ValuesToInspect.pop_back_val(); 65 const bool IsOffset = ValuePair.second; 66 for (auto &U : ValuePair.first->uses()) { 67 auto *I = cast<Instruction>(U.getUser()); 68 69 if (auto *LI = dyn_cast<LoadInst>(I)) { 70 // Ignore non-volatile loads, they are always ok. 71 if (!LI->isSimple()) return false; 72 continue; 73 } 74 75 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) { 76 // If uses of the bitcast are ok, we are ok. 77 ValuesToInspect.emplace_back(I, IsOffset); 78 continue; 79 } 80 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 81 // If the GEP has all zero indices, it doesn't offset the pointer. If it 82 // doesn't, it does. 83 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices()); 84 continue; 85 } 86 87 if (auto CS = CallSite(I)) { 88 // If this is the function being called then we treat it like a load and 89 // ignore it. 90 if (CS.isCallee(&U)) 91 continue; 92 93 unsigned DataOpNo = CS.getDataOperandNo(&U); 94 bool IsArgOperand = CS.isArgOperand(&U); 95 96 // Inalloca arguments are clobbered by the call. 97 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo)) 98 return false; 99 100 // If this is a readonly/readnone call site, then we know it is just a 101 // load (but one that potentially returns the value itself), so we can 102 // ignore it if we know that the value isn't captured. 103 if (CS.onlyReadsMemory() && 104 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo))) 105 continue; 106 107 // If this is being passed as a byval argument, the caller is making a 108 // copy, so it is only a read of the alloca. 109 if (IsArgOperand && CS.isByValArgument(DataOpNo)) 110 continue; 111 } 112 113 // Lifetime intrinsics can be handled by the caller. 114 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 115 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 116 II->getIntrinsicID() == Intrinsic::lifetime_end) { 117 assert(II->use_empty() && "Lifetime markers have no result to use!"); 118 ToDelete.push_back(II); 119 continue; 120 } 121 } 122 123 // If this is isn't our memcpy/memmove, reject it as something we can't 124 // handle. 125 MemTransferInst *MI = dyn_cast<MemTransferInst>(I); 126 if (!MI) 127 return false; 128 129 // If the transfer is using the alloca as a source of the transfer, then 130 // ignore it since it is a load (unless the transfer is volatile). 131 if (U.getOperandNo() == 1) { 132 if (MI->isVolatile()) return false; 133 continue; 134 } 135 136 // If we already have seen a copy, reject the second one. 137 if (TheCopy) return false; 138 139 // If the pointer has been offset from the start of the alloca, we can't 140 // safely handle this. 141 if (IsOffset) return false; 142 143 // If the memintrinsic isn't using the alloca as the dest, reject it. 144 if (U.getOperandNo() != 0) return false; 145 146 // If the source of the memcpy/move is not a constant global, reject it. 147 if (!pointsToConstantGlobal(MI->getSource())) 148 return false; 149 150 // Otherwise, the transform is safe. Remember the copy instruction. 151 TheCopy = MI; 152 } 153 } 154 return true; 155 } 156 157 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 158 /// modified by a copy from a constant global. If we can prove this, we can 159 /// replace any uses of the alloca with uses of the global directly. 160 static MemTransferInst * 161 isOnlyCopiedFromConstantGlobal(AllocaInst *AI, 162 SmallVectorImpl<Instruction *> &ToDelete) { 163 MemTransferInst *TheCopy = nullptr; 164 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) 165 return TheCopy; 166 return nullptr; 167 } 168 169 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) { 170 // Check for array size of 1 (scalar allocation). 171 if (!AI.isArrayAllocation()) { 172 // i32 1 is the canonical array size for scalar allocations. 173 if (AI.getArraySize()->getType()->isIntegerTy(32)) 174 return nullptr; 175 176 // Canonicalize it. 177 Value *V = IC.Builder->getInt32(1); 178 AI.setOperand(0, V); 179 return &AI; 180 } 181 182 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 183 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { 184 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); 185 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName()); 186 New->setAlignment(AI.getAlignment()); 187 188 // Scan to the end of the allocation instructions, to skip over a block of 189 // allocas if possible...also skip interleaved debug info 190 // 191 BasicBlock::iterator It(New); 192 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) 193 ++It; 194 195 // Now that I is pointing to the first non-allocation-inst in the block, 196 // insert our getelementptr instruction... 197 // 198 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType()); 199 Value *NullIdx = Constant::getNullValue(IdxTy); 200 Value *Idx[2] = {NullIdx, NullIdx}; 201 Instruction *GEP = 202 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub"); 203 IC.InsertNewInstBefore(GEP, *It); 204 205 // Now make everything use the getelementptr instead of the original 206 // allocation. 207 return IC.replaceInstUsesWith(AI, GEP); 208 } 209 210 if (isa<UndefValue>(AI.getArraySize())) 211 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); 212 213 // Ensure that the alloca array size argument has type intptr_t, so that 214 // any casting is exposed early. 215 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType()); 216 if (AI.getArraySize()->getType() != IntPtrTy) { 217 Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false); 218 AI.setOperand(0, V); 219 return &AI; 220 } 221 222 return nullptr; 223 } 224 225 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { 226 if (auto *I = simplifyAllocaArraySize(*this, AI)) 227 return I; 228 229 if (AI.getAllocatedType()->isSized()) { 230 // If the alignment is 0 (unspecified), assign it the preferred alignment. 231 if (AI.getAlignment() == 0) 232 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType())); 233 234 // Move all alloca's of zero byte objects to the entry block and merge them 235 // together. Note that we only do this for alloca's, because malloc should 236 // allocate and return a unique pointer, even for a zero byte allocation. 237 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) { 238 // For a zero sized alloca there is no point in doing an array allocation. 239 // This is helpful if the array size is a complicated expression not used 240 // elsewhere. 241 if (AI.isArrayAllocation()) { 242 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1)); 243 return &AI; 244 } 245 246 // Get the first instruction in the entry block. 247 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock(); 248 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg(); 249 if (FirstInst != &AI) { 250 // If the entry block doesn't start with a zero-size alloca then move 251 // this one to the start of the entry block. There is no problem with 252 // dominance as the array size was forced to a constant earlier already. 253 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst); 254 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() || 255 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) { 256 AI.moveBefore(FirstInst); 257 return &AI; 258 } 259 260 // If the alignment of the entry block alloca is 0 (unspecified), 261 // assign it the preferred alignment. 262 if (EntryAI->getAlignment() == 0) 263 EntryAI->setAlignment( 264 DL.getPrefTypeAlignment(EntryAI->getAllocatedType())); 265 // Replace this zero-sized alloca with the one at the start of the entry 266 // block after ensuring that the address will be aligned enough for both 267 // types. 268 unsigned MaxAlign = std::max(EntryAI->getAlignment(), 269 AI.getAlignment()); 270 EntryAI->setAlignment(MaxAlign); 271 if (AI.getType() != EntryAI->getType()) 272 return new BitCastInst(EntryAI, AI.getType()); 273 return replaceInstUsesWith(AI, EntryAI); 274 } 275 } 276 } 277 278 if (AI.getAlignment()) { 279 // Check to see if this allocation is only modified by a memcpy/memmove from 280 // a constant global whose alignment is equal to or exceeds that of the 281 // allocation. If this is the case, we can change all users to use 282 // the constant global instead. This is commonly produced by the CFE by 283 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 284 // is only subsequently read. 285 SmallVector<Instruction *, 4> ToDelete; 286 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { 287 unsigned SourceAlign = getOrEnforceKnownAlignment( 288 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT); 289 if (AI.getAlignment() <= SourceAlign) { 290 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); 291 DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); 292 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) 293 eraseInstFromFunction(*ToDelete[i]); 294 Constant *TheSrc = cast<Constant>(Copy->getSource()); 295 Constant *Cast 296 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType()); 297 Instruction *NewI = replaceInstUsesWith(AI, Cast); 298 eraseInstFromFunction(*Copy); 299 ++NumGlobalCopies; 300 return NewI; 301 } 302 } 303 } 304 305 // At last, use the generic allocation site handler to aggressively remove 306 // unused allocas. 307 return visitAllocSite(AI); 308 } 309 310 /// \brief Helper to combine a load to a new type. 311 /// 312 /// This just does the work of combining a load to a new type. It handles 313 /// metadata, etc., and returns the new instruction. The \c NewTy should be the 314 /// loaded *value* type. This will convert it to a pointer, cast the operand to 315 /// that pointer type, load it, etc. 316 /// 317 /// Note that this will create all of the instructions with whatever insert 318 /// point the \c InstCombiner currently is using. 319 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy, 320 const Twine &Suffix = "") { 321 Value *Ptr = LI.getPointerOperand(); 322 unsigned AS = LI.getPointerAddressSpace(); 323 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 324 LI.getAllMetadata(MD); 325 326 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad( 327 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)), 328 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix); 329 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope()); 330 MDBuilder MDB(NewLoad->getContext()); 331 for (const auto &MDPair : MD) { 332 unsigned ID = MDPair.first; 333 MDNode *N = MDPair.second; 334 // Note, essentially every kind of metadata should be preserved here! This 335 // routine is supposed to clone a load instruction changing *only its type*. 336 // The only metadata it makes sense to drop is metadata which is invalidated 337 // when the pointer type changes. This should essentially never be the case 338 // in LLVM, but we explicitly switch over only known metadata to be 339 // conservatively correct. If you are adding metadata to LLVM which pertains 340 // to loads, you almost certainly want to add it here. 341 switch (ID) { 342 case LLVMContext::MD_dbg: 343 case LLVMContext::MD_tbaa: 344 case LLVMContext::MD_prof: 345 case LLVMContext::MD_fpmath: 346 case LLVMContext::MD_tbaa_struct: 347 case LLVMContext::MD_invariant_load: 348 case LLVMContext::MD_alias_scope: 349 case LLVMContext::MD_noalias: 350 case LLVMContext::MD_nontemporal: 351 case LLVMContext::MD_mem_parallel_loop_access: 352 // All of these directly apply. 353 NewLoad->setMetadata(ID, N); 354 break; 355 356 case LLVMContext::MD_nonnull: 357 // This only directly applies if the new type is also a pointer. 358 if (NewTy->isPointerTy()) { 359 NewLoad->setMetadata(ID, N); 360 break; 361 } 362 // If it's integral now, translate it to !range metadata. 363 if (NewTy->isIntegerTy()) { 364 auto *ITy = cast<IntegerType>(NewTy); 365 auto *NullInt = ConstantExpr::getPtrToInt( 366 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy); 367 auto *NonNullInt = 368 ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1)); 369 NewLoad->setMetadata(LLVMContext::MD_range, 370 MDB.createRange(NonNullInt, NullInt)); 371 } 372 break; 373 case LLVMContext::MD_align: 374 case LLVMContext::MD_dereferenceable: 375 case LLVMContext::MD_dereferenceable_or_null: 376 // These only directly apply if the new type is also a pointer. 377 if (NewTy->isPointerTy()) 378 NewLoad->setMetadata(ID, N); 379 break; 380 case LLVMContext::MD_range: 381 // FIXME: It would be nice to propagate this in some way, but the type 382 // conversions make it hard. If the new type is a pointer, we could 383 // translate it to !nonnull metadata. 384 break; 385 } 386 } 387 return NewLoad; 388 } 389 390 /// \brief Combine a store to a new type. 391 /// 392 /// Returns the newly created store instruction. 393 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) { 394 Value *Ptr = SI.getPointerOperand(); 395 unsigned AS = SI.getPointerAddressSpace(); 396 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 397 SI.getAllMetadata(MD); 398 399 StoreInst *NewStore = IC.Builder->CreateAlignedStore( 400 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)), 401 SI.getAlignment(), SI.isVolatile()); 402 NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope()); 403 for (const auto &MDPair : MD) { 404 unsigned ID = MDPair.first; 405 MDNode *N = MDPair.second; 406 // Note, essentially every kind of metadata should be preserved here! This 407 // routine is supposed to clone a store instruction changing *only its 408 // type*. The only metadata it makes sense to drop is metadata which is 409 // invalidated when the pointer type changes. This should essentially 410 // never be the case in LLVM, but we explicitly switch over only known 411 // metadata to be conservatively correct. If you are adding metadata to 412 // LLVM which pertains to stores, you almost certainly want to add it 413 // here. 414 switch (ID) { 415 case LLVMContext::MD_dbg: 416 case LLVMContext::MD_tbaa: 417 case LLVMContext::MD_prof: 418 case LLVMContext::MD_fpmath: 419 case LLVMContext::MD_tbaa_struct: 420 case LLVMContext::MD_alias_scope: 421 case LLVMContext::MD_noalias: 422 case LLVMContext::MD_nontemporal: 423 case LLVMContext::MD_mem_parallel_loop_access: 424 // All of these directly apply. 425 NewStore->setMetadata(ID, N); 426 break; 427 428 case LLVMContext::MD_invariant_load: 429 case LLVMContext::MD_nonnull: 430 case LLVMContext::MD_range: 431 case LLVMContext::MD_align: 432 case LLVMContext::MD_dereferenceable: 433 case LLVMContext::MD_dereferenceable_or_null: 434 // These don't apply for stores. 435 break; 436 } 437 } 438 439 return NewStore; 440 } 441 442 /// \brief Combine loads to match the type of their uses' value after looking 443 /// through intervening bitcasts. 444 /// 445 /// The core idea here is that if the result of a load is used in an operation, 446 /// we should load the type most conducive to that operation. For example, when 447 /// loading an integer and converting that immediately to a pointer, we should 448 /// instead directly load a pointer. 449 /// 450 /// However, this routine must never change the width of a load or the number of 451 /// loads as that would introduce a semantic change. This combine is expected to 452 /// be a semantic no-op which just allows loads to more closely model the types 453 /// of their consuming operations. 454 /// 455 /// Currently, we also refuse to change the precise type used for an atomic load 456 /// or a volatile load. This is debatable, and might be reasonable to change 457 /// later. However, it is risky in case some backend or other part of LLVM is 458 /// relying on the exact type loaded to select appropriate atomic operations. 459 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) { 460 // FIXME: We could probably with some care handle both volatile and ordered 461 // atomic loads here but it isn't clear that this is important. 462 if (!LI.isUnordered()) 463 return nullptr; 464 465 if (LI.use_empty()) 466 return nullptr; 467 468 Type *Ty = LI.getType(); 469 const DataLayout &DL = IC.getDataLayout(); 470 471 // Try to canonicalize loads which are only ever stored to operate over 472 // integers instead of any other type. We only do this when the loaded type 473 // is sized and has a size exactly the same as its store size and the store 474 // size is a legal integer type. 475 if (!Ty->isIntegerTy() && Ty->isSized() && 476 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) && 477 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) && 478 !DL.isNonIntegralPointerType(Ty)) { 479 if (all_of(LI.users(), [&LI](User *U) { 480 auto *SI = dyn_cast<StoreInst>(U); 481 return SI && SI->getPointerOperand() != &LI; 482 })) { 483 LoadInst *NewLoad = combineLoadToNewType( 484 IC, LI, 485 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty))); 486 // Replace all the stores with stores of the newly loaded value. 487 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) { 488 auto *SI = cast<StoreInst>(*UI++); 489 IC.Builder->SetInsertPoint(SI); 490 combineStoreToNewValue(IC, *SI, NewLoad); 491 IC.eraseInstFromFunction(*SI); 492 } 493 assert(LI.use_empty() && "Failed to remove all users of the load!"); 494 // Return the old load so the combiner can delete it safely. 495 return &LI; 496 } 497 } 498 499 // Fold away bit casts of the loaded value by loading the desired type. 500 // We can do this for BitCastInsts as well as casts from and to pointer types, 501 // as long as those are noops (i.e., the source or dest type have the same 502 // bitwidth as the target's pointers). 503 if (LI.hasOneUse()) 504 if (auto* CI = dyn_cast<CastInst>(LI.user_back())) { 505 if (CI->isNoopCast(DL)) { 506 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy()); 507 CI->replaceAllUsesWith(NewLoad); 508 IC.eraseInstFromFunction(*CI); 509 return &LI; 510 } 511 } 512 513 // FIXME: We should also canonicalize loads of vectors when their elements are 514 // cast to other types. 515 return nullptr; 516 } 517 518 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) { 519 // FIXME: We could probably with some care handle both volatile and atomic 520 // stores here but it isn't clear that this is important. 521 if (!LI.isSimple()) 522 return nullptr; 523 524 Type *T = LI.getType(); 525 if (!T->isAggregateType()) 526 return nullptr; 527 528 StringRef Name = LI.getName(); 529 assert(LI.getAlignment() && "Alignment must be set at this point"); 530 531 if (auto *ST = dyn_cast<StructType>(T)) { 532 // If the struct only have one element, we unpack. 533 auto NumElements = ST->getNumElements(); 534 if (NumElements == 1) { 535 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U), 536 ".unpack"); 537 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue( 538 UndefValue::get(T), NewLoad, 0, Name)); 539 } 540 541 // We don't want to break loads with padding here as we'd loose 542 // the knowledge that padding exists for the rest of the pipeline. 543 const DataLayout &DL = IC.getDataLayout(); 544 auto *SL = DL.getStructLayout(ST); 545 if (SL->hasPadding()) 546 return nullptr; 547 548 auto Align = LI.getAlignment(); 549 if (!Align) 550 Align = DL.getABITypeAlignment(ST); 551 552 auto *Addr = LI.getPointerOperand(); 553 auto *IdxType = Type::getInt32Ty(T->getContext()); 554 auto *Zero = ConstantInt::get(IdxType, 0); 555 556 Value *V = UndefValue::get(T); 557 for (unsigned i = 0; i < NumElements; i++) { 558 Value *Indices[2] = { 559 Zero, 560 ConstantInt::get(IdxType, i), 561 }; 562 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), 563 Name + ".elt"); 564 auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); 565 auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack"); 566 V = IC.Builder->CreateInsertValue(V, L, i); 567 } 568 569 V->setName(Name); 570 return IC.replaceInstUsesWith(LI, V); 571 } 572 573 if (auto *AT = dyn_cast<ArrayType>(T)) { 574 auto *ET = AT->getElementType(); 575 auto NumElements = AT->getNumElements(); 576 if (NumElements == 1) { 577 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack"); 578 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue( 579 UndefValue::get(T), NewLoad, 0, Name)); 580 } 581 582 const DataLayout &DL = IC.getDataLayout(); 583 auto EltSize = DL.getTypeAllocSize(ET); 584 auto Align = LI.getAlignment(); 585 if (!Align) 586 Align = DL.getABITypeAlignment(T); 587 588 auto *Addr = LI.getPointerOperand(); 589 auto *IdxType = Type::getInt64Ty(T->getContext()); 590 auto *Zero = ConstantInt::get(IdxType, 0); 591 592 Value *V = UndefValue::get(T); 593 uint64_t Offset = 0; 594 for (uint64_t i = 0; i < NumElements; i++) { 595 Value *Indices[2] = { 596 Zero, 597 ConstantInt::get(IdxType, i), 598 }; 599 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), 600 Name + ".elt"); 601 auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset), 602 Name + ".unpack"); 603 V = IC.Builder->CreateInsertValue(V, L, i); 604 Offset += EltSize; 605 } 606 607 V->setName(Name); 608 return IC.replaceInstUsesWith(LI, V); 609 } 610 611 return nullptr; 612 } 613 614 // If we can determine that all possible objects pointed to by the provided 615 // pointer value are, not only dereferenceable, but also definitively less than 616 // or equal to the provided maximum size, then return true. Otherwise, return 617 // false (constant global values and allocas fall into this category). 618 // 619 // FIXME: This should probably live in ValueTracking (or similar). 620 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, 621 const DataLayout &DL) { 622 SmallPtrSet<Value *, 4> Visited; 623 SmallVector<Value *, 4> Worklist(1, V); 624 625 do { 626 Value *P = Worklist.pop_back_val(); 627 P = P->stripPointerCasts(); 628 629 if (!Visited.insert(P).second) 630 continue; 631 632 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 633 Worklist.push_back(SI->getTrueValue()); 634 Worklist.push_back(SI->getFalseValue()); 635 continue; 636 } 637 638 if (PHINode *PN = dyn_cast<PHINode>(P)) { 639 for (Value *IncValue : PN->incoming_values()) 640 Worklist.push_back(IncValue); 641 continue; 642 } 643 644 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) { 645 if (GA->isInterposable()) 646 return false; 647 Worklist.push_back(GA->getAliasee()); 648 continue; 649 } 650 651 // If we know how big this object is, and it is less than MaxSize, continue 652 // searching. Otherwise, return false. 653 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) { 654 if (!AI->getAllocatedType()->isSized()) 655 return false; 656 657 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize()); 658 if (!CS) 659 return false; 660 661 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType()); 662 // Make sure that, even if the multiplication below would wrap as an 663 // uint64_t, we still do the right thing. 664 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize)) 665 return false; 666 continue; 667 } 668 669 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 670 if (!GV->hasDefinitiveInitializer() || !GV->isConstant()) 671 return false; 672 673 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType()); 674 if (InitSize > MaxSize) 675 return false; 676 continue; 677 } 678 679 return false; 680 } while (!Worklist.empty()); 681 682 return true; 683 } 684 685 // If we're indexing into an object of a known size, and the outer index is 686 // not a constant, but having any value but zero would lead to undefined 687 // behavior, replace it with zero. 688 // 689 // For example, if we have: 690 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4 691 // ... 692 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x 693 // ... = load i32* %arrayidx, align 4 694 // Then we know that we can replace %x in the GEP with i64 0. 695 // 696 // FIXME: We could fold any GEP index to zero that would cause UB if it were 697 // not zero. Currently, we only handle the first such index. Also, we could 698 // also search through non-zero constant indices if we kept track of the 699 // offsets those indices implied. 700 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI, 701 Instruction *MemI, unsigned &Idx) { 702 if (GEPI->getNumOperands() < 2) 703 return false; 704 705 // Find the first non-zero index of a GEP. If all indices are zero, return 706 // one past the last index. 707 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) { 708 unsigned I = 1; 709 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) { 710 Value *V = GEPI->getOperand(I); 711 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) 712 if (CI->isZero()) 713 continue; 714 715 break; 716 } 717 718 return I; 719 }; 720 721 // Skip through initial 'zero' indices, and find the corresponding pointer 722 // type. See if the next index is not a constant. 723 Idx = FirstNZIdx(GEPI); 724 if (Idx == GEPI->getNumOperands()) 725 return false; 726 if (isa<Constant>(GEPI->getOperand(Idx))) 727 return false; 728 729 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx); 730 Type *AllocTy = 731 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops); 732 if (!AllocTy || !AllocTy->isSized()) 733 return false; 734 const DataLayout &DL = IC.getDataLayout(); 735 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy); 736 737 // If there are more indices after the one we might replace with a zero, make 738 // sure they're all non-negative. If any of them are negative, the overall 739 // address being computed might be before the base address determined by the 740 // first non-zero index. 741 auto IsAllNonNegative = [&]() { 742 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) { 743 bool KnownNonNegative, KnownNegative; 744 IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative, 745 KnownNegative, 0, MemI); 746 if (KnownNonNegative) 747 continue; 748 return false; 749 } 750 751 return true; 752 }; 753 754 // FIXME: If the GEP is not inbounds, and there are extra indices after the 755 // one we'll replace, those could cause the address computation to wrap 756 // (rendering the IsAllNonNegative() check below insufficient). We can do 757 // better, ignoring zero indices (and other indices we can prove small 758 // enough not to wrap). 759 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds()) 760 return false; 761 762 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is 763 // also known to be dereferenceable. 764 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) && 765 IsAllNonNegative(); 766 } 767 768 // If we're indexing into an object with a variable index for the memory 769 // access, but the object has only one element, we can assume that the index 770 // will always be zero. If we replace the GEP, return it. 771 template <typename T> 772 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr, 773 T &MemI) { 774 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) { 775 unsigned Idx; 776 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) { 777 Instruction *NewGEPI = GEPI->clone(); 778 NewGEPI->setOperand(Idx, 779 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0)); 780 NewGEPI->insertBefore(GEPI); 781 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI); 782 return NewGEPI; 783 } 784 } 785 786 return nullptr; 787 } 788 789 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { 790 Value *Op = LI.getOperand(0); 791 792 // Try to canonicalize the loaded type. 793 if (Instruction *Res = combineLoadToOperationType(*this, LI)) 794 return Res; 795 796 // Attempt to improve the alignment. 797 unsigned KnownAlign = getOrEnforceKnownAlignment( 798 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT); 799 unsigned LoadAlign = LI.getAlignment(); 800 unsigned EffectiveLoadAlign = 801 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType()); 802 803 if (KnownAlign > EffectiveLoadAlign) 804 LI.setAlignment(KnownAlign); 805 else if (LoadAlign == 0) 806 LI.setAlignment(EffectiveLoadAlign); 807 808 // Replace GEP indices if possible. 809 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) { 810 Worklist.Add(NewGEPI); 811 return &LI; 812 } 813 814 if (Instruction *Res = unpackLoadToAggregate(*this, LI)) 815 return Res; 816 817 // Do really simple store-to-load forwarding and load CSE, to catch cases 818 // where there are several consecutive memory accesses to the same location, 819 // separated by a few arithmetic operations. 820 BasicBlock::iterator BBI(LI); 821 bool IsLoadCSE = false; 822 if (Value *AvailableVal = 823 FindAvailableLoadedValue(&LI, LI.getParent(), BBI, 824 DefMaxInstsToScan, AA, &IsLoadCSE)) { 825 if (IsLoadCSE) { 826 LoadInst *NLI = cast<LoadInst>(AvailableVal); 827 unsigned KnownIDs[] = { 828 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 829 LLVMContext::MD_noalias, LLVMContext::MD_range, 830 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull, 831 LLVMContext::MD_invariant_group, LLVMContext::MD_align, 832 LLVMContext::MD_dereferenceable, 833 LLVMContext::MD_dereferenceable_or_null}; 834 combineMetadata(NLI, &LI, KnownIDs); 835 }; 836 837 return replaceInstUsesWith( 838 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(), 839 LI.getName() + ".cast")); 840 } 841 842 // None of the following transforms are legal for volatile/ordered atomic 843 // loads. Most of them do apply for unordered atomics. 844 if (!LI.isUnordered()) return nullptr; 845 846 // load(gep null, ...) -> unreachable 847 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) { 848 const Value *GEPI0 = GEPI->getOperand(0); 849 // TODO: Consider a target hook for valid address spaces for this xform. 850 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){ 851 // Insert a new store to null instruction before the load to indicate 852 // that this code is not reachable. We do this instead of inserting 853 // an unreachable instruction directly because we cannot modify the 854 // CFG. 855 new StoreInst(UndefValue::get(LI.getType()), 856 Constant::getNullValue(Op->getType()), &LI); 857 return replaceInstUsesWith(LI, UndefValue::get(LI.getType())); 858 } 859 } 860 861 // load null/undef -> unreachable 862 // TODO: Consider a target hook for valid address spaces for this xform. 863 if (isa<UndefValue>(Op) || 864 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) { 865 // Insert a new store to null instruction before the load to indicate that 866 // this code is not reachable. We do this instead of inserting an 867 // unreachable instruction directly because we cannot modify the CFG. 868 new StoreInst(UndefValue::get(LI.getType()), 869 Constant::getNullValue(Op->getType()), &LI); 870 return replaceInstUsesWith(LI, UndefValue::get(LI.getType())); 871 } 872 873 if (Op->hasOneUse()) { 874 // Change select and PHI nodes to select values instead of addresses: this 875 // helps alias analysis out a lot, allows many others simplifications, and 876 // exposes redundancy in the code. 877 // 878 // Note that we cannot do the transformation unless we know that the 879 // introduced loads cannot trap! Something like this is valid as long as 880 // the condition is always false: load (select bool %C, int* null, int* %G), 881 // but it would not be valid if we transformed it to load from null 882 // unconditionally. 883 // 884 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) { 885 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). 886 unsigned Align = LI.getAlignment(); 887 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) && 888 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) { 889 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1), 890 SI->getOperand(1)->getName()+".val"); 891 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2), 892 SI->getOperand(2)->getName()+".val"); 893 assert(LI.isUnordered() && "implied by above"); 894 V1->setAlignment(Align); 895 V1->setAtomic(LI.getOrdering(), LI.getSynchScope()); 896 V2->setAlignment(Align); 897 V2->setAtomic(LI.getOrdering(), LI.getSynchScope()); 898 return SelectInst::Create(SI->getCondition(), V1, V2); 899 } 900 901 // load (select (cond, null, P)) -> load P 902 if (isa<ConstantPointerNull>(SI->getOperand(1)) && 903 LI.getPointerAddressSpace() == 0) { 904 LI.setOperand(0, SI->getOperand(2)); 905 return &LI; 906 } 907 908 // load (select (cond, P, null)) -> load P 909 if (isa<ConstantPointerNull>(SI->getOperand(2)) && 910 LI.getPointerAddressSpace() == 0) { 911 LI.setOperand(0, SI->getOperand(1)); 912 return &LI; 913 } 914 } 915 } 916 return nullptr; 917 } 918 919 /// \brief Look for extractelement/insertvalue sequence that acts like a bitcast. 920 /// 921 /// \returns underlying value that was "cast", or nullptr otherwise. 922 /// 923 /// For example, if we have: 924 /// 925 /// %E0 = extractelement <2 x double> %U, i32 0 926 /// %V0 = insertvalue [2 x double] undef, double %E0, 0 927 /// %E1 = extractelement <2 x double> %U, i32 1 928 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1 929 /// 930 /// and the layout of a <2 x double> is isomorphic to a [2 x double], 931 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U. 932 /// Note that %U may contain non-undef values where %V1 has undef. 933 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) { 934 Value *U = nullptr; 935 while (auto *IV = dyn_cast<InsertValueInst>(V)) { 936 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand()); 937 if (!E) 938 return nullptr; 939 auto *W = E->getVectorOperand(); 940 if (!U) 941 U = W; 942 else if (U != W) 943 return nullptr; 944 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand()); 945 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin()) 946 return nullptr; 947 V = IV->getAggregateOperand(); 948 } 949 if (!isa<UndefValue>(V) ||!U) 950 return nullptr; 951 952 auto *UT = cast<VectorType>(U->getType()); 953 auto *VT = V->getType(); 954 // Check that types UT and VT are bitwise isomorphic. 955 const auto &DL = IC.getDataLayout(); 956 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) { 957 return nullptr; 958 } 959 if (auto *AT = dyn_cast<ArrayType>(VT)) { 960 if (AT->getNumElements() != UT->getNumElements()) 961 return nullptr; 962 } else { 963 auto *ST = cast<StructType>(VT); 964 if (ST->getNumElements() != UT->getNumElements()) 965 return nullptr; 966 for (const auto *EltT : ST->elements()) { 967 if (EltT != UT->getElementType()) 968 return nullptr; 969 } 970 } 971 return U; 972 } 973 974 /// \brief Combine stores to match the type of value being stored. 975 /// 976 /// The core idea here is that the memory does not have any intrinsic type and 977 /// where we can we should match the type of a store to the type of value being 978 /// stored. 979 /// 980 /// However, this routine must never change the width of a store or the number of 981 /// stores as that would introduce a semantic change. This combine is expected to 982 /// be a semantic no-op which just allows stores to more closely model the types 983 /// of their incoming values. 984 /// 985 /// Currently, we also refuse to change the precise type used for an atomic or 986 /// volatile store. This is debatable, and might be reasonable to change later. 987 /// However, it is risky in case some backend or other part of LLVM is relying 988 /// on the exact type stored to select appropriate atomic operations. 989 /// 990 /// \returns true if the store was successfully combined away. This indicates 991 /// the caller must erase the store instruction. We have to let the caller erase 992 /// the store instruction as otherwise there is no way to signal whether it was 993 /// combined or not: IC.EraseInstFromFunction returns a null pointer. 994 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) { 995 // FIXME: We could probably with some care handle both volatile and ordered 996 // atomic stores here but it isn't clear that this is important. 997 if (!SI.isUnordered()) 998 return false; 999 1000 Value *V = SI.getValueOperand(); 1001 1002 // Fold away bit casts of the stored value by storing the original type. 1003 if (auto *BC = dyn_cast<BitCastInst>(V)) { 1004 V = BC->getOperand(0); 1005 combineStoreToNewValue(IC, SI, V); 1006 return true; 1007 } 1008 1009 if (Value *U = likeBitCastFromVector(IC, V)) { 1010 combineStoreToNewValue(IC, SI, U); 1011 return true; 1012 } 1013 1014 // FIXME: We should also canonicalize stores of vectors when their elements 1015 // are cast to other types. 1016 return false; 1017 } 1018 1019 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { 1020 // FIXME: We could probably with some care handle both volatile and atomic 1021 // stores here but it isn't clear that this is important. 1022 if (!SI.isSimple()) 1023 return false; 1024 1025 Value *V = SI.getValueOperand(); 1026 Type *T = V->getType(); 1027 1028 if (!T->isAggregateType()) 1029 return false; 1030 1031 if (auto *ST = dyn_cast<StructType>(T)) { 1032 // If the struct only have one element, we unpack. 1033 unsigned Count = ST->getNumElements(); 1034 if (Count == 1) { 1035 V = IC.Builder->CreateExtractValue(V, 0); 1036 combineStoreToNewValue(IC, SI, V); 1037 return true; 1038 } 1039 1040 // We don't want to break loads with padding here as we'd loose 1041 // the knowledge that padding exists for the rest of the pipeline. 1042 const DataLayout &DL = IC.getDataLayout(); 1043 auto *SL = DL.getStructLayout(ST); 1044 if (SL->hasPadding()) 1045 return false; 1046 1047 auto Align = SI.getAlignment(); 1048 if (!Align) 1049 Align = DL.getABITypeAlignment(ST); 1050 1051 SmallString<16> EltName = V->getName(); 1052 EltName += ".elt"; 1053 auto *Addr = SI.getPointerOperand(); 1054 SmallString<16> AddrName = Addr->getName(); 1055 AddrName += ".repack"; 1056 1057 auto *IdxType = Type::getInt32Ty(ST->getContext()); 1058 auto *Zero = ConstantInt::get(IdxType, 0); 1059 for (unsigned i = 0; i < Count; i++) { 1060 Value *Indices[2] = { 1061 Zero, 1062 ConstantInt::get(IdxType, i), 1063 }; 1064 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), 1065 AddrName); 1066 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName); 1067 auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); 1068 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); 1069 } 1070 1071 return true; 1072 } 1073 1074 if (auto *AT = dyn_cast<ArrayType>(T)) { 1075 // If the array only have one element, we unpack. 1076 auto NumElements = AT->getNumElements(); 1077 if (NumElements == 1) { 1078 V = IC.Builder->CreateExtractValue(V, 0); 1079 combineStoreToNewValue(IC, SI, V); 1080 return true; 1081 } 1082 1083 const DataLayout &DL = IC.getDataLayout(); 1084 auto EltSize = DL.getTypeAllocSize(AT->getElementType()); 1085 auto Align = SI.getAlignment(); 1086 if (!Align) 1087 Align = DL.getABITypeAlignment(T); 1088 1089 SmallString<16> EltName = V->getName(); 1090 EltName += ".elt"; 1091 auto *Addr = SI.getPointerOperand(); 1092 SmallString<16> AddrName = Addr->getName(); 1093 AddrName += ".repack"; 1094 1095 auto *IdxType = Type::getInt64Ty(T->getContext()); 1096 auto *Zero = ConstantInt::get(IdxType, 0); 1097 1098 uint64_t Offset = 0; 1099 for (uint64_t i = 0; i < NumElements; i++) { 1100 Value *Indices[2] = { 1101 Zero, 1102 ConstantInt::get(IdxType, i), 1103 }; 1104 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), 1105 AddrName); 1106 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName); 1107 auto EltAlign = MinAlign(Align, Offset); 1108 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); 1109 Offset += EltSize; 1110 } 1111 1112 return true; 1113 } 1114 1115 return false; 1116 } 1117 1118 /// equivalentAddressValues - Test if A and B will obviously have the same 1119 /// value. This includes recognizing that %t0 and %t1 will have the same 1120 /// value in code like this: 1121 /// %t0 = getelementptr \@a, 0, 3 1122 /// store i32 0, i32* %t0 1123 /// %t1 = getelementptr \@a, 0, 3 1124 /// %t2 = load i32* %t1 1125 /// 1126 static bool equivalentAddressValues(Value *A, Value *B) { 1127 // Test if the values are trivially equivalent. 1128 if (A == B) return true; 1129 1130 // Test if the values come form identical arithmetic instructions. 1131 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because 1132 // its only used to compare two uses within the same basic block, which 1133 // means that they'll always either have the same value or one of them 1134 // will have an undefined value. 1135 if (isa<BinaryOperator>(A) || 1136 isa<CastInst>(A) || 1137 isa<PHINode>(A) || 1138 isa<GetElementPtrInst>(A)) 1139 if (Instruction *BI = dyn_cast<Instruction>(B)) 1140 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) 1141 return true; 1142 1143 // Otherwise they may not be equivalent. 1144 return false; 1145 } 1146 1147 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { 1148 Value *Val = SI.getOperand(0); 1149 Value *Ptr = SI.getOperand(1); 1150 1151 // Try to canonicalize the stored type. 1152 if (combineStoreToValueType(*this, SI)) 1153 return eraseInstFromFunction(SI); 1154 1155 // Attempt to improve the alignment. 1156 unsigned KnownAlign = getOrEnforceKnownAlignment( 1157 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT); 1158 unsigned StoreAlign = SI.getAlignment(); 1159 unsigned EffectiveStoreAlign = 1160 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType()); 1161 1162 if (KnownAlign > EffectiveStoreAlign) 1163 SI.setAlignment(KnownAlign); 1164 else if (StoreAlign == 0) 1165 SI.setAlignment(EffectiveStoreAlign); 1166 1167 // Try to canonicalize the stored type. 1168 if (unpackStoreToAggregate(*this, SI)) 1169 return eraseInstFromFunction(SI); 1170 1171 // Replace GEP indices if possible. 1172 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) { 1173 Worklist.Add(NewGEPI); 1174 return &SI; 1175 } 1176 1177 // Don't hack volatile/ordered stores. 1178 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring. 1179 if (!SI.isUnordered()) return nullptr; 1180 1181 // If the RHS is an alloca with a single use, zapify the store, making the 1182 // alloca dead. 1183 if (Ptr->hasOneUse()) { 1184 if (isa<AllocaInst>(Ptr)) 1185 return eraseInstFromFunction(SI); 1186 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 1187 if (isa<AllocaInst>(GEP->getOperand(0))) { 1188 if (GEP->getOperand(0)->hasOneUse()) 1189 return eraseInstFromFunction(SI); 1190 } 1191 } 1192 } 1193 1194 // Do really simple DSE, to catch cases where there are several consecutive 1195 // stores to the same location, separated by a few arithmetic operations. This 1196 // situation often occurs with bitfield accesses. 1197 BasicBlock::iterator BBI(SI); 1198 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts; 1199 --ScanInsts) { 1200 --BBI; 1201 // Don't count debug info directives, lest they affect codegen, 1202 // and we skip pointer-to-pointer bitcasts, which are NOPs. 1203 if (isa<DbgInfoIntrinsic>(BBI) || 1204 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { 1205 ScanInsts++; 1206 continue; 1207 } 1208 1209 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { 1210 // Prev store isn't volatile, and stores to the same location? 1211 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1), 1212 SI.getOperand(1))) { 1213 ++NumDeadStore; 1214 ++BBI; 1215 eraseInstFromFunction(*PrevSI); 1216 continue; 1217 } 1218 break; 1219 } 1220 1221 // If this is a load, we have to stop. However, if the loaded value is from 1222 // the pointer we're loading and is producing the pointer we're storing, 1223 // then *this* store is dead (X = load P; store X -> P). 1224 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { 1225 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) { 1226 assert(SI.isUnordered() && "can't eliminate ordering operation"); 1227 return eraseInstFromFunction(SI); 1228 } 1229 1230 // Otherwise, this is a load from some other location. Stores before it 1231 // may not be dead. 1232 break; 1233 } 1234 1235 // Don't skip over loads or things that can modify memory. 1236 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory()) 1237 break; 1238 } 1239 1240 // store X, null -> turns into 'unreachable' in SimplifyCFG 1241 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) { 1242 if (!isa<UndefValue>(Val)) { 1243 SI.setOperand(0, UndefValue::get(Val->getType())); 1244 if (Instruction *U = dyn_cast<Instruction>(Val)) 1245 Worklist.Add(U); // Dropped a use. 1246 } 1247 return nullptr; // Do not modify these! 1248 } 1249 1250 // store undef, Ptr -> noop 1251 if (isa<UndefValue>(Val)) 1252 return eraseInstFromFunction(SI); 1253 1254 // If this store is the last instruction in the basic block (possibly 1255 // excepting debug info instructions), and if the block ends with an 1256 // unconditional branch, try to move it to the successor block. 1257 BBI = SI.getIterator(); 1258 do { 1259 ++BBI; 1260 } while (isa<DbgInfoIntrinsic>(BBI) || 1261 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())); 1262 if (BranchInst *BI = dyn_cast<BranchInst>(BBI)) 1263 if (BI->isUnconditional()) 1264 if (SimplifyStoreAtEndOfBlock(SI)) 1265 return nullptr; // xform done! 1266 1267 return nullptr; 1268 } 1269 1270 /// SimplifyStoreAtEndOfBlock - Turn things like: 1271 /// if () { *P = v1; } else { *P = v2 } 1272 /// into a phi node with a store in the successor. 1273 /// 1274 /// Simplify things like: 1275 /// *P = v1; if () { *P = v2; } 1276 /// into a phi node with a store in the successor. 1277 /// 1278 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { 1279 assert(SI.isUnordered() && 1280 "this code has not been auditted for volatile or ordered store case"); 1281 1282 BasicBlock *StoreBB = SI.getParent(); 1283 1284 // Check to see if the successor block has exactly two incoming edges. If 1285 // so, see if the other predecessor contains a store to the same location. 1286 // if so, insert a PHI node (if needed) and move the stores down. 1287 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0); 1288 1289 // Determine whether Dest has exactly two predecessors and, if so, compute 1290 // the other predecessor. 1291 pred_iterator PI = pred_begin(DestBB); 1292 BasicBlock *P = *PI; 1293 BasicBlock *OtherBB = nullptr; 1294 1295 if (P != StoreBB) 1296 OtherBB = P; 1297 1298 if (++PI == pred_end(DestBB)) 1299 return false; 1300 1301 P = *PI; 1302 if (P != StoreBB) { 1303 if (OtherBB) 1304 return false; 1305 OtherBB = P; 1306 } 1307 if (++PI != pred_end(DestBB)) 1308 return false; 1309 1310 // Bail out if all the relevant blocks aren't distinct (this can happen, 1311 // for example, if SI is in an infinite loop) 1312 if (StoreBB == DestBB || OtherBB == DestBB) 1313 return false; 1314 1315 // Verify that the other block ends in a branch and is not otherwise empty. 1316 BasicBlock::iterator BBI(OtherBB->getTerminator()); 1317 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI); 1318 if (!OtherBr || BBI == OtherBB->begin()) 1319 return false; 1320 1321 // If the other block ends in an unconditional branch, check for the 'if then 1322 // else' case. there is an instruction before the branch. 1323 StoreInst *OtherStore = nullptr; 1324 if (OtherBr->isUnconditional()) { 1325 --BBI; 1326 // Skip over debugging info. 1327 while (isa<DbgInfoIntrinsic>(BBI) || 1328 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { 1329 if (BBI==OtherBB->begin()) 1330 return false; 1331 --BBI; 1332 } 1333 // If this isn't a store, isn't a store to the same location, or is not the 1334 // right kind of store, bail out. 1335 OtherStore = dyn_cast<StoreInst>(BBI); 1336 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) || 1337 !SI.isSameOperationAs(OtherStore)) 1338 return false; 1339 } else { 1340 // Otherwise, the other block ended with a conditional branch. If one of the 1341 // destinations is StoreBB, then we have the if/then case. 1342 if (OtherBr->getSuccessor(0) != StoreBB && 1343 OtherBr->getSuccessor(1) != StoreBB) 1344 return false; 1345 1346 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an 1347 // if/then triangle. See if there is a store to the same ptr as SI that 1348 // lives in OtherBB. 1349 for (;; --BBI) { 1350 // Check to see if we find the matching store. 1351 if ((OtherStore = dyn_cast<StoreInst>(BBI))) { 1352 if (OtherStore->getOperand(1) != SI.getOperand(1) || 1353 !SI.isSameOperationAs(OtherStore)) 1354 return false; 1355 break; 1356 } 1357 // If we find something that may be using or overwriting the stored 1358 // value, or if we run out of instructions, we can't do the xform. 1359 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() || 1360 BBI == OtherBB->begin()) 1361 return false; 1362 } 1363 1364 // In order to eliminate the store in OtherBr, we have to 1365 // make sure nothing reads or overwrites the stored value in 1366 // StoreBB. 1367 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) { 1368 // FIXME: This should really be AA driven. 1369 if (I->mayReadFromMemory() || I->mayWriteToMemory()) 1370 return false; 1371 } 1372 } 1373 1374 // Insert a PHI node now if we need it. 1375 Value *MergedVal = OtherStore->getOperand(0); 1376 if (MergedVal != SI.getOperand(0)) { 1377 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge"); 1378 PN->addIncoming(SI.getOperand(0), SI.getParent()); 1379 PN->addIncoming(OtherStore->getOperand(0), OtherBB); 1380 MergedVal = InsertNewInstBefore(PN, DestBB->front()); 1381 } 1382 1383 // Advance to a place where it is safe to insert the new store and 1384 // insert it. 1385 BBI = DestBB->getFirstInsertionPt(); 1386 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), 1387 SI.isVolatile(), 1388 SI.getAlignment(), 1389 SI.getOrdering(), 1390 SI.getSynchScope()); 1391 InsertNewInstBefore(NewSI, *BBI); 1392 NewSI->setDebugLoc(OtherStore->getDebugLoc()); 1393 1394 // If the two stores had AA tags, merge them. 1395 AAMDNodes AATags; 1396 SI.getAAMetadata(AATags); 1397 if (AATags) { 1398 OtherStore->getAAMetadata(AATags, /* Merge = */ true); 1399 NewSI->setAAMetadata(AATags); 1400 } 1401 1402 // Nuke the old stores. 1403 eraseInstFromFunction(SI); 1404 eraseInstFromFunction(*OtherStore); 1405 return true; 1406 } 1407