1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visit functions for load, store and alloca. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/MapVector.h" 16 #include "llvm/ADT/SmallString.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/Loads.h" 19 #include "llvm/Transforms/Utils/Local.h" 20 #include "llvm/IR/ConstantRange.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/DebugInfoMetadata.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/LLVMContext.h" 25 #include "llvm/IR/MDBuilder.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 28 using namespace llvm; 29 using namespace PatternMatch; 30 31 #define DEBUG_TYPE "instcombine" 32 33 STATISTIC(NumDeadStore, "Number of dead stores eliminated"); 34 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global"); 35 36 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to 37 /// some part of a constant global variable. This intentionally only accepts 38 /// constant expressions because we can't rewrite arbitrary instructions. 39 static bool pointsToConstantGlobal(Value *V) { 40 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 41 return GV->isConstant(); 42 43 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 44 if (CE->getOpcode() == Instruction::BitCast || 45 CE->getOpcode() == Instruction::AddrSpaceCast || 46 CE->getOpcode() == Instruction::GetElementPtr) 47 return pointsToConstantGlobal(CE->getOperand(0)); 48 } 49 return false; 50 } 51 52 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 53 /// pointer to an alloca. Ignore any reads of the pointer, return false if we 54 /// see any stores or other unknown uses. If we see pointer arithmetic, keep 55 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse 56 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 57 /// the alloca, and if the source pointer is a pointer to a constant global, we 58 /// can optimize this. 59 static bool 60 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, 61 SmallVectorImpl<Instruction *> &ToDelete) { 62 // We track lifetime intrinsics as we encounter them. If we decide to go 63 // ahead and replace the value with the global, this lets the caller quickly 64 // eliminate the markers. 65 66 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect; 67 ValuesToInspect.emplace_back(V, false); 68 while (!ValuesToInspect.empty()) { 69 auto ValuePair = ValuesToInspect.pop_back_val(); 70 const bool IsOffset = ValuePair.second; 71 for (auto &U : ValuePair.first->uses()) { 72 auto *I = cast<Instruction>(U.getUser()); 73 74 if (auto *LI = dyn_cast<LoadInst>(I)) { 75 // Ignore non-volatile loads, they are always ok. 76 if (!LI->isSimple()) return false; 77 continue; 78 } 79 80 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) { 81 // If uses of the bitcast are ok, we are ok. 82 ValuesToInspect.emplace_back(I, IsOffset); 83 continue; 84 } 85 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 86 // If the GEP has all zero indices, it doesn't offset the pointer. If it 87 // doesn't, it does. 88 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices()); 89 continue; 90 } 91 92 if (auto CS = CallSite(I)) { 93 // If this is the function being called then we treat it like a load and 94 // ignore it. 95 if (CS.isCallee(&U)) 96 continue; 97 98 unsigned DataOpNo = CS.getDataOperandNo(&U); 99 bool IsArgOperand = CS.isArgOperand(&U); 100 101 // Inalloca arguments are clobbered by the call. 102 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo)) 103 return false; 104 105 // If this is a readonly/readnone call site, then we know it is just a 106 // load (but one that potentially returns the value itself), so we can 107 // ignore it if we know that the value isn't captured. 108 if (CS.onlyReadsMemory() && 109 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo))) 110 continue; 111 112 // If this is being passed as a byval argument, the caller is making a 113 // copy, so it is only a read of the alloca. 114 if (IsArgOperand && CS.isByValArgument(DataOpNo)) 115 continue; 116 } 117 118 // Lifetime intrinsics can be handled by the caller. 119 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 120 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 121 II->getIntrinsicID() == Intrinsic::lifetime_end) { 122 assert(II->use_empty() && "Lifetime markers have no result to use!"); 123 ToDelete.push_back(II); 124 continue; 125 } 126 } 127 128 // If this is isn't our memcpy/memmove, reject it as something we can't 129 // handle. 130 MemTransferInst *MI = dyn_cast<MemTransferInst>(I); 131 if (!MI) 132 return false; 133 134 // If the transfer is using the alloca as a source of the transfer, then 135 // ignore it since it is a load (unless the transfer is volatile). 136 if (U.getOperandNo() == 1) { 137 if (MI->isVolatile()) return false; 138 continue; 139 } 140 141 // If we already have seen a copy, reject the second one. 142 if (TheCopy) return false; 143 144 // If the pointer has been offset from the start of the alloca, we can't 145 // safely handle this. 146 if (IsOffset) return false; 147 148 // If the memintrinsic isn't using the alloca as the dest, reject it. 149 if (U.getOperandNo() != 0) return false; 150 151 // If the source of the memcpy/move is not a constant global, reject it. 152 if (!pointsToConstantGlobal(MI->getSource())) 153 return false; 154 155 // Otherwise, the transform is safe. Remember the copy instruction. 156 TheCopy = MI; 157 } 158 } 159 return true; 160 } 161 162 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 163 /// modified by a copy from a constant global. If we can prove this, we can 164 /// replace any uses of the alloca with uses of the global directly. 165 static MemTransferInst * 166 isOnlyCopiedFromConstantGlobal(AllocaInst *AI, 167 SmallVectorImpl<Instruction *> &ToDelete) { 168 MemTransferInst *TheCopy = nullptr; 169 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) 170 return TheCopy; 171 return nullptr; 172 } 173 174 /// Returns true if V is dereferenceable for size of alloca. 175 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, 176 const DataLayout &DL) { 177 if (AI->isArrayAllocation()) 178 return false; 179 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType()); 180 if (!AllocaSize) 181 return false; 182 return isDereferenceableAndAlignedPointer(V, AI->getAlignment(), 183 APInt(64, AllocaSize), DL); 184 } 185 186 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) { 187 // Check for array size of 1 (scalar allocation). 188 if (!AI.isArrayAllocation()) { 189 // i32 1 is the canonical array size for scalar allocations. 190 if (AI.getArraySize()->getType()->isIntegerTy(32)) 191 return nullptr; 192 193 // Canonicalize it. 194 Value *V = IC.Builder.getInt32(1); 195 AI.setOperand(0, V); 196 return &AI; 197 } 198 199 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 200 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { 201 if (C->getValue().getActiveBits() <= 64) { 202 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); 203 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName()); 204 New->setAlignment(AI.getAlignment()); 205 206 // Scan to the end of the allocation instructions, to skip over a block of 207 // allocas if possible...also skip interleaved debug info 208 // 209 BasicBlock::iterator It(New); 210 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) 211 ++It; 212 213 // Now that I is pointing to the first non-allocation-inst in the block, 214 // insert our getelementptr instruction... 215 // 216 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType()); 217 Value *NullIdx = Constant::getNullValue(IdxTy); 218 Value *Idx[2] = {NullIdx, NullIdx}; 219 Instruction *GEP = 220 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub"); 221 IC.InsertNewInstBefore(GEP, *It); 222 223 // Now make everything use the getelementptr instead of the original 224 // allocation. 225 return IC.replaceInstUsesWith(AI, GEP); 226 } 227 } 228 229 if (isa<UndefValue>(AI.getArraySize())) 230 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); 231 232 // Ensure that the alloca array size argument has type intptr_t, so that 233 // any casting is exposed early. 234 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType()); 235 if (AI.getArraySize()->getType() != IntPtrTy) { 236 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false); 237 AI.setOperand(0, V); 238 return &AI; 239 } 240 241 return nullptr; 242 } 243 244 namespace { 245 // If I and V are pointers in different address space, it is not allowed to 246 // use replaceAllUsesWith since I and V have different types. A 247 // non-target-specific transformation should not use addrspacecast on V since 248 // the two address space may be disjoint depending on target. 249 // 250 // This class chases down uses of the old pointer until reaching the load 251 // instructions, then replaces the old pointer in the load instructions with 252 // the new pointer. If during the chasing it sees bitcast or GEP, it will 253 // create new bitcast or GEP with the new pointer and use them in the load 254 // instruction. 255 class PointerReplacer { 256 public: 257 PointerReplacer(InstCombiner &IC) : IC(IC) {} 258 void replacePointer(Instruction &I, Value *V); 259 260 private: 261 void findLoadAndReplace(Instruction &I); 262 void replace(Instruction *I); 263 Value *getReplacement(Value *I); 264 265 SmallVector<Instruction *, 4> Path; 266 MapVector<Value *, Value *> WorkMap; 267 InstCombiner &IC; 268 }; 269 } // end anonymous namespace 270 271 void PointerReplacer::findLoadAndReplace(Instruction &I) { 272 for (auto U : I.users()) { 273 auto *Inst = dyn_cast<Instruction>(&*U); 274 if (!Inst) 275 return; 276 LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n'); 277 if (isa<LoadInst>(Inst)) { 278 for (auto P : Path) 279 replace(P); 280 replace(Inst); 281 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) { 282 Path.push_back(Inst); 283 findLoadAndReplace(*Inst); 284 Path.pop_back(); 285 } else { 286 return; 287 } 288 } 289 } 290 291 Value *PointerReplacer::getReplacement(Value *V) { 292 auto Loc = WorkMap.find(V); 293 if (Loc != WorkMap.end()) 294 return Loc->second; 295 return nullptr; 296 } 297 298 void PointerReplacer::replace(Instruction *I) { 299 if (getReplacement(I)) 300 return; 301 302 if (auto *LT = dyn_cast<LoadInst>(I)) { 303 auto *V = getReplacement(LT->getPointerOperand()); 304 assert(V && "Operand not replaced"); 305 auto *NewI = new LoadInst(V); 306 NewI->takeName(LT); 307 IC.InsertNewInstWith(NewI, *LT); 308 IC.replaceInstUsesWith(*LT, NewI); 309 WorkMap[LT] = NewI; 310 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 311 auto *V = getReplacement(GEP->getPointerOperand()); 312 assert(V && "Operand not replaced"); 313 SmallVector<Value *, 8> Indices; 314 Indices.append(GEP->idx_begin(), GEP->idx_end()); 315 auto *NewI = GetElementPtrInst::Create( 316 V->getType()->getPointerElementType(), V, Indices); 317 IC.InsertNewInstWith(NewI, *GEP); 318 NewI->takeName(GEP); 319 WorkMap[GEP] = NewI; 320 } else if (auto *BC = dyn_cast<BitCastInst>(I)) { 321 auto *V = getReplacement(BC->getOperand(0)); 322 assert(V && "Operand not replaced"); 323 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(), 324 V->getType()->getPointerAddressSpace()); 325 auto *NewI = new BitCastInst(V, NewT); 326 IC.InsertNewInstWith(NewI, *BC); 327 NewI->takeName(BC); 328 WorkMap[BC] = NewI; 329 } else { 330 llvm_unreachable("should never reach here"); 331 } 332 } 333 334 void PointerReplacer::replacePointer(Instruction &I, Value *V) { 335 #ifndef NDEBUG 336 auto *PT = cast<PointerType>(I.getType()); 337 auto *NT = cast<PointerType>(V->getType()); 338 assert(PT != NT && PT->getElementType() == NT->getElementType() && 339 "Invalid usage"); 340 #endif 341 WorkMap[&I] = V; 342 findLoadAndReplace(I); 343 } 344 345 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { 346 if (auto *I = simplifyAllocaArraySize(*this, AI)) 347 return I; 348 349 if (AI.getAllocatedType()->isSized()) { 350 // If the alignment is 0 (unspecified), assign it the preferred alignment. 351 if (AI.getAlignment() == 0) 352 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType())); 353 354 // Move all alloca's of zero byte objects to the entry block and merge them 355 // together. Note that we only do this for alloca's, because malloc should 356 // allocate and return a unique pointer, even for a zero byte allocation. 357 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) { 358 // For a zero sized alloca there is no point in doing an array allocation. 359 // This is helpful if the array size is a complicated expression not used 360 // elsewhere. 361 if (AI.isArrayAllocation()) { 362 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1)); 363 return &AI; 364 } 365 366 // Get the first instruction in the entry block. 367 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock(); 368 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg(); 369 if (FirstInst != &AI) { 370 // If the entry block doesn't start with a zero-size alloca then move 371 // this one to the start of the entry block. There is no problem with 372 // dominance as the array size was forced to a constant earlier already. 373 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst); 374 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() || 375 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) { 376 AI.moveBefore(FirstInst); 377 return &AI; 378 } 379 380 // If the alignment of the entry block alloca is 0 (unspecified), 381 // assign it the preferred alignment. 382 if (EntryAI->getAlignment() == 0) 383 EntryAI->setAlignment( 384 DL.getPrefTypeAlignment(EntryAI->getAllocatedType())); 385 // Replace this zero-sized alloca with the one at the start of the entry 386 // block after ensuring that the address will be aligned enough for both 387 // types. 388 unsigned MaxAlign = std::max(EntryAI->getAlignment(), 389 AI.getAlignment()); 390 EntryAI->setAlignment(MaxAlign); 391 if (AI.getType() != EntryAI->getType()) 392 return new BitCastInst(EntryAI, AI.getType()); 393 return replaceInstUsesWith(AI, EntryAI); 394 } 395 } 396 } 397 398 if (AI.getAlignment()) { 399 // Check to see if this allocation is only modified by a memcpy/memmove from 400 // a constant global whose alignment is equal to or exceeds that of the 401 // allocation. If this is the case, we can change all users to use 402 // the constant global instead. This is commonly produced by the CFE by 403 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 404 // is only subsequently read. 405 SmallVector<Instruction *, 4> ToDelete; 406 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { 407 unsigned SourceAlign = getOrEnforceKnownAlignment( 408 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT); 409 if (AI.getAlignment() <= SourceAlign && 410 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) { 411 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); 412 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); 413 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) 414 eraseInstFromFunction(*ToDelete[i]); 415 Constant *TheSrc = cast<Constant>(Copy->getSource()); 416 auto *SrcTy = TheSrc->getType(); 417 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(), 418 SrcTy->getPointerAddressSpace()); 419 Constant *Cast = 420 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy); 421 if (AI.getType()->getPointerAddressSpace() == 422 SrcTy->getPointerAddressSpace()) { 423 Instruction *NewI = replaceInstUsesWith(AI, Cast); 424 eraseInstFromFunction(*Copy); 425 ++NumGlobalCopies; 426 return NewI; 427 } else { 428 PointerReplacer PtrReplacer(*this); 429 PtrReplacer.replacePointer(AI, Cast); 430 ++NumGlobalCopies; 431 } 432 } 433 } 434 } 435 436 // At last, use the generic allocation site handler to aggressively remove 437 // unused allocas. 438 return visitAllocSite(AI); 439 } 440 441 // Are we allowed to form a atomic load or store of this type? 442 static bool isSupportedAtomicType(Type *Ty) { 443 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy(); 444 } 445 446 /// Helper to combine a load to a new type. 447 /// 448 /// This just does the work of combining a load to a new type. It handles 449 /// metadata, etc., and returns the new instruction. The \c NewTy should be the 450 /// loaded *value* type. This will convert it to a pointer, cast the operand to 451 /// that pointer type, load it, etc. 452 /// 453 /// Note that this will create all of the instructions with whatever insert 454 /// point the \c InstCombiner currently is using. 455 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy, 456 const Twine &Suffix = "") { 457 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) && 458 "can't fold an atomic load to requested type"); 459 460 Value *Ptr = LI.getPointerOperand(); 461 unsigned AS = LI.getPointerAddressSpace(); 462 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 463 LI.getAllMetadata(MD); 464 465 Value *NewPtr = nullptr; 466 if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) && 467 NewPtr->getType()->getPointerElementType() == NewTy && 468 NewPtr->getType()->getPointerAddressSpace() == AS)) 469 NewPtr = IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)); 470 471 LoadInst *NewLoad = IC.Builder.CreateAlignedLoad( 472 NewPtr, LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix); 473 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 474 MDBuilder MDB(NewLoad->getContext()); 475 for (const auto &MDPair : MD) { 476 unsigned ID = MDPair.first; 477 MDNode *N = MDPair.second; 478 // Note, essentially every kind of metadata should be preserved here! This 479 // routine is supposed to clone a load instruction changing *only its type*. 480 // The only metadata it makes sense to drop is metadata which is invalidated 481 // when the pointer type changes. This should essentially never be the case 482 // in LLVM, but we explicitly switch over only known metadata to be 483 // conservatively correct. If you are adding metadata to LLVM which pertains 484 // to loads, you almost certainly want to add it here. 485 switch (ID) { 486 case LLVMContext::MD_dbg: 487 case LLVMContext::MD_tbaa: 488 case LLVMContext::MD_prof: 489 case LLVMContext::MD_fpmath: 490 case LLVMContext::MD_tbaa_struct: 491 case LLVMContext::MD_invariant_load: 492 case LLVMContext::MD_alias_scope: 493 case LLVMContext::MD_noalias: 494 case LLVMContext::MD_nontemporal: 495 case LLVMContext::MD_mem_parallel_loop_access: 496 // All of these directly apply. 497 NewLoad->setMetadata(ID, N); 498 break; 499 500 case LLVMContext::MD_nonnull: 501 copyNonnullMetadata(LI, N, *NewLoad); 502 break; 503 case LLVMContext::MD_align: 504 case LLVMContext::MD_dereferenceable: 505 case LLVMContext::MD_dereferenceable_or_null: 506 // These only directly apply if the new type is also a pointer. 507 if (NewTy->isPointerTy()) 508 NewLoad->setMetadata(ID, N); 509 break; 510 case LLVMContext::MD_range: 511 copyRangeMetadata(IC.getDataLayout(), LI, N, *NewLoad); 512 break; 513 } 514 } 515 return NewLoad; 516 } 517 518 /// Combine a store to a new type. 519 /// 520 /// Returns the newly created store instruction. 521 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) { 522 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) && 523 "can't fold an atomic store of requested type"); 524 525 Value *Ptr = SI.getPointerOperand(); 526 unsigned AS = SI.getPointerAddressSpace(); 527 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 528 SI.getAllMetadata(MD); 529 530 StoreInst *NewStore = IC.Builder.CreateAlignedStore( 531 V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)), 532 SI.getAlignment(), SI.isVolatile()); 533 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); 534 for (const auto &MDPair : MD) { 535 unsigned ID = MDPair.first; 536 MDNode *N = MDPair.second; 537 // Note, essentially every kind of metadata should be preserved here! This 538 // routine is supposed to clone a store instruction changing *only its 539 // type*. The only metadata it makes sense to drop is metadata which is 540 // invalidated when the pointer type changes. This should essentially 541 // never be the case in LLVM, but we explicitly switch over only known 542 // metadata to be conservatively correct. If you are adding metadata to 543 // LLVM which pertains to stores, you almost certainly want to add it 544 // here. 545 switch (ID) { 546 case LLVMContext::MD_dbg: 547 case LLVMContext::MD_tbaa: 548 case LLVMContext::MD_prof: 549 case LLVMContext::MD_fpmath: 550 case LLVMContext::MD_tbaa_struct: 551 case LLVMContext::MD_alias_scope: 552 case LLVMContext::MD_noalias: 553 case LLVMContext::MD_nontemporal: 554 case LLVMContext::MD_mem_parallel_loop_access: 555 // All of these directly apply. 556 NewStore->setMetadata(ID, N); 557 break; 558 559 case LLVMContext::MD_invariant_load: 560 case LLVMContext::MD_nonnull: 561 case LLVMContext::MD_range: 562 case LLVMContext::MD_align: 563 case LLVMContext::MD_dereferenceable: 564 case LLVMContext::MD_dereferenceable_or_null: 565 // These don't apply for stores. 566 break; 567 } 568 } 569 570 return NewStore; 571 } 572 573 /// Returns true if instruction represent minmax pattern like: 574 /// select ((cmp load V1, load V2), V1, V2). 575 static bool isMinMaxWithLoads(Value *V) { 576 assert(V->getType()->isPointerTy() && "Expected pointer type."); 577 // Ignore possible ty* to ixx* bitcast. 578 V = peekThroughBitcast(V); 579 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax 580 // pattern. 581 CmpInst::Predicate Pred; 582 Instruction *L1; 583 Instruction *L2; 584 Value *LHS; 585 Value *RHS; 586 if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)), 587 m_Value(LHS), m_Value(RHS)))) 588 return false; 589 return (match(L1, m_Load(m_Specific(LHS))) && 590 match(L2, m_Load(m_Specific(RHS)))) || 591 (match(L1, m_Load(m_Specific(RHS))) && 592 match(L2, m_Load(m_Specific(LHS)))); 593 } 594 595 /// Combine loads to match the type of their uses' value after looking 596 /// through intervening bitcasts. 597 /// 598 /// The core idea here is that if the result of a load is used in an operation, 599 /// we should load the type most conducive to that operation. For example, when 600 /// loading an integer and converting that immediately to a pointer, we should 601 /// instead directly load a pointer. 602 /// 603 /// However, this routine must never change the width of a load or the number of 604 /// loads as that would introduce a semantic change. This combine is expected to 605 /// be a semantic no-op which just allows loads to more closely model the types 606 /// of their consuming operations. 607 /// 608 /// Currently, we also refuse to change the precise type used for an atomic load 609 /// or a volatile load. This is debatable, and might be reasonable to change 610 /// later. However, it is risky in case some backend or other part of LLVM is 611 /// relying on the exact type loaded to select appropriate atomic operations. 612 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) { 613 // FIXME: We could probably with some care handle both volatile and ordered 614 // atomic loads here but it isn't clear that this is important. 615 if (!LI.isUnordered()) 616 return nullptr; 617 618 if (LI.use_empty()) 619 return nullptr; 620 621 // swifterror values can't be bitcasted. 622 if (LI.getPointerOperand()->isSwiftError()) 623 return nullptr; 624 625 Type *Ty = LI.getType(); 626 const DataLayout &DL = IC.getDataLayout(); 627 628 // Try to canonicalize loads which are only ever stored to operate over 629 // integers instead of any other type. We only do this when the loaded type 630 // is sized and has a size exactly the same as its store size and the store 631 // size is a legal integer type. 632 // Do not perform canonicalization if minmax pattern is found (to avoid 633 // infinite loop). 634 if (!Ty->isIntegerTy() && Ty->isSized() && 635 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) && 636 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) && 637 !DL.isNonIntegralPointerType(Ty) && 638 !isMinMaxWithLoads( 639 peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true))) { 640 if (all_of(LI.users(), [&LI](User *U) { 641 auto *SI = dyn_cast<StoreInst>(U); 642 return SI && SI->getPointerOperand() != &LI && 643 !SI->getPointerOperand()->isSwiftError(); 644 })) { 645 LoadInst *NewLoad = combineLoadToNewType( 646 IC, LI, 647 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty))); 648 // Replace all the stores with stores of the newly loaded value. 649 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) { 650 auto *SI = cast<StoreInst>(*UI++); 651 IC.Builder.SetInsertPoint(SI); 652 combineStoreToNewValue(IC, *SI, NewLoad); 653 IC.eraseInstFromFunction(*SI); 654 } 655 assert(LI.use_empty() && "Failed to remove all users of the load!"); 656 // Return the old load so the combiner can delete it safely. 657 return &LI; 658 } 659 } 660 661 // Fold away bit casts of the loaded value by loading the desired type. 662 // We can do this for BitCastInsts as well as casts from and to pointer types, 663 // as long as those are noops (i.e., the source or dest type have the same 664 // bitwidth as the target's pointers). 665 if (LI.hasOneUse()) 666 if (auto* CI = dyn_cast<CastInst>(LI.user_back())) 667 if (CI->isNoopCast(DL)) 668 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) { 669 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy()); 670 CI->replaceAllUsesWith(NewLoad); 671 IC.eraseInstFromFunction(*CI); 672 return &LI; 673 } 674 675 // FIXME: We should also canonicalize loads of vectors when their elements are 676 // cast to other types. 677 return nullptr; 678 } 679 680 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) { 681 // FIXME: We could probably with some care handle both volatile and atomic 682 // stores here but it isn't clear that this is important. 683 if (!LI.isSimple()) 684 return nullptr; 685 686 Type *T = LI.getType(); 687 if (!T->isAggregateType()) 688 return nullptr; 689 690 StringRef Name = LI.getName(); 691 assert(LI.getAlignment() && "Alignment must be set at this point"); 692 693 if (auto *ST = dyn_cast<StructType>(T)) { 694 // If the struct only have one element, we unpack. 695 auto NumElements = ST->getNumElements(); 696 if (NumElements == 1) { 697 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U), 698 ".unpack"); 699 AAMDNodes AAMD; 700 LI.getAAMetadata(AAMD); 701 NewLoad->setAAMetadata(AAMD); 702 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue( 703 UndefValue::get(T), NewLoad, 0, Name)); 704 } 705 706 // We don't want to break loads with padding here as we'd loose 707 // the knowledge that padding exists for the rest of the pipeline. 708 const DataLayout &DL = IC.getDataLayout(); 709 auto *SL = DL.getStructLayout(ST); 710 if (SL->hasPadding()) 711 return nullptr; 712 713 auto Align = LI.getAlignment(); 714 if (!Align) 715 Align = DL.getABITypeAlignment(ST); 716 717 auto *Addr = LI.getPointerOperand(); 718 auto *IdxType = Type::getInt32Ty(T->getContext()); 719 auto *Zero = ConstantInt::get(IdxType, 0); 720 721 Value *V = UndefValue::get(T); 722 for (unsigned i = 0; i < NumElements; i++) { 723 Value *Indices[2] = { 724 Zero, 725 ConstantInt::get(IdxType, i), 726 }; 727 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), 728 Name + ".elt"); 729 auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); 730 auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack"); 731 // Propagate AA metadata. It'll still be valid on the narrowed load. 732 AAMDNodes AAMD; 733 LI.getAAMetadata(AAMD); 734 L->setAAMetadata(AAMD); 735 V = IC.Builder.CreateInsertValue(V, L, i); 736 } 737 738 V->setName(Name); 739 return IC.replaceInstUsesWith(LI, V); 740 } 741 742 if (auto *AT = dyn_cast<ArrayType>(T)) { 743 auto *ET = AT->getElementType(); 744 auto NumElements = AT->getNumElements(); 745 if (NumElements == 1) { 746 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack"); 747 AAMDNodes AAMD; 748 LI.getAAMetadata(AAMD); 749 NewLoad->setAAMetadata(AAMD); 750 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue( 751 UndefValue::get(T), NewLoad, 0, Name)); 752 } 753 754 // Bail out if the array is too large. Ideally we would like to optimize 755 // arrays of arbitrary size but this has a terrible impact on compile time. 756 // The threshold here is chosen arbitrarily, maybe needs a little bit of 757 // tuning. 758 if (NumElements > IC.MaxArraySizeForCombine) 759 return nullptr; 760 761 const DataLayout &DL = IC.getDataLayout(); 762 auto EltSize = DL.getTypeAllocSize(ET); 763 auto Align = LI.getAlignment(); 764 if (!Align) 765 Align = DL.getABITypeAlignment(T); 766 767 auto *Addr = LI.getPointerOperand(); 768 auto *IdxType = Type::getInt64Ty(T->getContext()); 769 auto *Zero = ConstantInt::get(IdxType, 0); 770 771 Value *V = UndefValue::get(T); 772 uint64_t Offset = 0; 773 for (uint64_t i = 0; i < NumElements; i++) { 774 Value *Indices[2] = { 775 Zero, 776 ConstantInt::get(IdxType, i), 777 }; 778 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), 779 Name + ".elt"); 780 auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset), 781 Name + ".unpack"); 782 AAMDNodes AAMD; 783 LI.getAAMetadata(AAMD); 784 L->setAAMetadata(AAMD); 785 V = IC.Builder.CreateInsertValue(V, L, i); 786 Offset += EltSize; 787 } 788 789 V->setName(Name); 790 return IC.replaceInstUsesWith(LI, V); 791 } 792 793 return nullptr; 794 } 795 796 // If we can determine that all possible objects pointed to by the provided 797 // pointer value are, not only dereferenceable, but also definitively less than 798 // or equal to the provided maximum size, then return true. Otherwise, return 799 // false (constant global values and allocas fall into this category). 800 // 801 // FIXME: This should probably live in ValueTracking (or similar). 802 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, 803 const DataLayout &DL) { 804 SmallPtrSet<Value *, 4> Visited; 805 SmallVector<Value *, 4> Worklist(1, V); 806 807 do { 808 Value *P = Worklist.pop_back_val(); 809 P = P->stripPointerCasts(); 810 811 if (!Visited.insert(P).second) 812 continue; 813 814 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 815 Worklist.push_back(SI->getTrueValue()); 816 Worklist.push_back(SI->getFalseValue()); 817 continue; 818 } 819 820 if (PHINode *PN = dyn_cast<PHINode>(P)) { 821 for (Value *IncValue : PN->incoming_values()) 822 Worklist.push_back(IncValue); 823 continue; 824 } 825 826 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) { 827 if (GA->isInterposable()) 828 return false; 829 Worklist.push_back(GA->getAliasee()); 830 continue; 831 } 832 833 // If we know how big this object is, and it is less than MaxSize, continue 834 // searching. Otherwise, return false. 835 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) { 836 if (!AI->getAllocatedType()->isSized()) 837 return false; 838 839 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize()); 840 if (!CS) 841 return false; 842 843 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType()); 844 // Make sure that, even if the multiplication below would wrap as an 845 // uint64_t, we still do the right thing. 846 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize)) 847 return false; 848 continue; 849 } 850 851 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 852 if (!GV->hasDefinitiveInitializer() || !GV->isConstant()) 853 return false; 854 855 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType()); 856 if (InitSize > MaxSize) 857 return false; 858 continue; 859 } 860 861 return false; 862 } while (!Worklist.empty()); 863 864 return true; 865 } 866 867 // If we're indexing into an object of a known size, and the outer index is 868 // not a constant, but having any value but zero would lead to undefined 869 // behavior, replace it with zero. 870 // 871 // For example, if we have: 872 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4 873 // ... 874 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x 875 // ... = load i32* %arrayidx, align 4 876 // Then we know that we can replace %x in the GEP with i64 0. 877 // 878 // FIXME: We could fold any GEP index to zero that would cause UB if it were 879 // not zero. Currently, we only handle the first such index. Also, we could 880 // also search through non-zero constant indices if we kept track of the 881 // offsets those indices implied. 882 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI, 883 Instruction *MemI, unsigned &Idx) { 884 if (GEPI->getNumOperands() < 2) 885 return false; 886 887 // Find the first non-zero index of a GEP. If all indices are zero, return 888 // one past the last index. 889 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) { 890 unsigned I = 1; 891 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) { 892 Value *V = GEPI->getOperand(I); 893 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) 894 if (CI->isZero()) 895 continue; 896 897 break; 898 } 899 900 return I; 901 }; 902 903 // Skip through initial 'zero' indices, and find the corresponding pointer 904 // type. See if the next index is not a constant. 905 Idx = FirstNZIdx(GEPI); 906 if (Idx == GEPI->getNumOperands()) 907 return false; 908 if (isa<Constant>(GEPI->getOperand(Idx))) 909 return false; 910 911 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx); 912 Type *AllocTy = 913 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops); 914 if (!AllocTy || !AllocTy->isSized()) 915 return false; 916 const DataLayout &DL = IC.getDataLayout(); 917 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy); 918 919 // If there are more indices after the one we might replace with a zero, make 920 // sure they're all non-negative. If any of them are negative, the overall 921 // address being computed might be before the base address determined by the 922 // first non-zero index. 923 auto IsAllNonNegative = [&]() { 924 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) { 925 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI); 926 if (Known.isNonNegative()) 927 continue; 928 return false; 929 } 930 931 return true; 932 }; 933 934 // FIXME: If the GEP is not inbounds, and there are extra indices after the 935 // one we'll replace, those could cause the address computation to wrap 936 // (rendering the IsAllNonNegative() check below insufficient). We can do 937 // better, ignoring zero indices (and other indices we can prove small 938 // enough not to wrap). 939 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds()) 940 return false; 941 942 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is 943 // also known to be dereferenceable. 944 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) && 945 IsAllNonNegative(); 946 } 947 948 // If we're indexing into an object with a variable index for the memory 949 // access, but the object has only one element, we can assume that the index 950 // will always be zero. If we replace the GEP, return it. 951 template <typename T> 952 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr, 953 T &MemI) { 954 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) { 955 unsigned Idx; 956 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) { 957 Instruction *NewGEPI = GEPI->clone(); 958 NewGEPI->setOperand(Idx, 959 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0)); 960 NewGEPI->insertBefore(GEPI); 961 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI); 962 return NewGEPI; 963 } 964 } 965 966 return nullptr; 967 } 968 969 static bool canSimplifyNullStoreOrGEP(StoreInst &SI) { 970 if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace())) 971 return false; 972 973 auto *Ptr = SI.getPointerOperand(); 974 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) 975 Ptr = GEPI->getOperand(0); 976 return (isa<ConstantPointerNull>(Ptr) && 977 !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace())); 978 } 979 980 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) { 981 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) { 982 const Value *GEPI0 = GEPI->getOperand(0); 983 if (isa<ConstantPointerNull>(GEPI0) && 984 !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace())) 985 return true; 986 } 987 if (isa<UndefValue>(Op) || 988 (isa<ConstantPointerNull>(Op) && 989 !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace()))) 990 return true; 991 return false; 992 } 993 994 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { 995 Value *Op = LI.getOperand(0); 996 997 // Try to canonicalize the loaded type. 998 if (Instruction *Res = combineLoadToOperationType(*this, LI)) 999 return Res; 1000 1001 // Attempt to improve the alignment. 1002 unsigned KnownAlign = getOrEnforceKnownAlignment( 1003 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT); 1004 unsigned LoadAlign = LI.getAlignment(); 1005 unsigned EffectiveLoadAlign = 1006 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType()); 1007 1008 if (KnownAlign > EffectiveLoadAlign) 1009 LI.setAlignment(KnownAlign); 1010 else if (LoadAlign == 0) 1011 LI.setAlignment(EffectiveLoadAlign); 1012 1013 // Replace GEP indices if possible. 1014 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) { 1015 Worklist.Add(NewGEPI); 1016 return &LI; 1017 } 1018 1019 if (Instruction *Res = unpackLoadToAggregate(*this, LI)) 1020 return Res; 1021 1022 // Do really simple store-to-load forwarding and load CSE, to catch cases 1023 // where there are several consecutive memory accesses to the same location, 1024 // separated by a few arithmetic operations. 1025 BasicBlock::iterator BBI(LI); 1026 bool IsLoadCSE = false; 1027 if (Value *AvailableVal = FindAvailableLoadedValue( 1028 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) { 1029 if (IsLoadCSE) 1030 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false); 1031 1032 return replaceInstUsesWith( 1033 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(), 1034 LI.getName() + ".cast")); 1035 } 1036 1037 // None of the following transforms are legal for volatile/ordered atomic 1038 // loads. Most of them do apply for unordered atomics. 1039 if (!LI.isUnordered()) return nullptr; 1040 1041 // load(gep null, ...) -> unreachable 1042 // load null/undef -> unreachable 1043 // TODO: Consider a target hook for valid address spaces for this xforms. 1044 if (canSimplifyNullLoadOrGEP(LI, Op)) { 1045 // Insert a new store to null instruction before the load to indicate 1046 // that this code is not reachable. We do this instead of inserting 1047 // an unreachable instruction directly because we cannot modify the 1048 // CFG. 1049 StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()), 1050 Constant::getNullValue(Op->getType()), &LI); 1051 SI->setDebugLoc(LI.getDebugLoc()); 1052 return replaceInstUsesWith(LI, UndefValue::get(LI.getType())); 1053 } 1054 1055 if (Op->hasOneUse()) { 1056 // Change select and PHI nodes to select values instead of addresses: this 1057 // helps alias analysis out a lot, allows many others simplifications, and 1058 // exposes redundancy in the code. 1059 // 1060 // Note that we cannot do the transformation unless we know that the 1061 // introduced loads cannot trap! Something like this is valid as long as 1062 // the condition is always false: load (select bool %C, int* null, int* %G), 1063 // but it would not be valid if we transformed it to load from null 1064 // unconditionally. 1065 // 1066 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) { 1067 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). 1068 unsigned Align = LI.getAlignment(); 1069 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) && 1070 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) { 1071 LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1), 1072 SI->getOperand(1)->getName()+".val"); 1073 LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2), 1074 SI->getOperand(2)->getName()+".val"); 1075 assert(LI.isUnordered() && "implied by above"); 1076 V1->setAlignment(Align); 1077 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 1078 V2->setAlignment(Align); 1079 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 1080 return SelectInst::Create(SI->getCondition(), V1, V2); 1081 } 1082 1083 // load (select (cond, null, P)) -> load P 1084 if (isa<ConstantPointerNull>(SI->getOperand(1)) && 1085 !NullPointerIsDefined(SI->getFunction(), 1086 LI.getPointerAddressSpace())) { 1087 LI.setOperand(0, SI->getOperand(2)); 1088 return &LI; 1089 } 1090 1091 // load (select (cond, P, null)) -> load P 1092 if (isa<ConstantPointerNull>(SI->getOperand(2)) && 1093 !NullPointerIsDefined(SI->getFunction(), 1094 LI.getPointerAddressSpace())) { 1095 LI.setOperand(0, SI->getOperand(1)); 1096 return &LI; 1097 } 1098 } 1099 } 1100 return nullptr; 1101 } 1102 1103 /// Look for extractelement/insertvalue sequence that acts like a bitcast. 1104 /// 1105 /// \returns underlying value that was "cast", or nullptr otherwise. 1106 /// 1107 /// For example, if we have: 1108 /// 1109 /// %E0 = extractelement <2 x double> %U, i32 0 1110 /// %V0 = insertvalue [2 x double] undef, double %E0, 0 1111 /// %E1 = extractelement <2 x double> %U, i32 1 1112 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1 1113 /// 1114 /// and the layout of a <2 x double> is isomorphic to a [2 x double], 1115 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U. 1116 /// Note that %U may contain non-undef values where %V1 has undef. 1117 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) { 1118 Value *U = nullptr; 1119 while (auto *IV = dyn_cast<InsertValueInst>(V)) { 1120 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand()); 1121 if (!E) 1122 return nullptr; 1123 auto *W = E->getVectorOperand(); 1124 if (!U) 1125 U = W; 1126 else if (U != W) 1127 return nullptr; 1128 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand()); 1129 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin()) 1130 return nullptr; 1131 V = IV->getAggregateOperand(); 1132 } 1133 if (!isa<UndefValue>(V) ||!U) 1134 return nullptr; 1135 1136 auto *UT = cast<VectorType>(U->getType()); 1137 auto *VT = V->getType(); 1138 // Check that types UT and VT are bitwise isomorphic. 1139 const auto &DL = IC.getDataLayout(); 1140 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) { 1141 return nullptr; 1142 } 1143 if (auto *AT = dyn_cast<ArrayType>(VT)) { 1144 if (AT->getNumElements() != UT->getNumElements()) 1145 return nullptr; 1146 } else { 1147 auto *ST = cast<StructType>(VT); 1148 if (ST->getNumElements() != UT->getNumElements()) 1149 return nullptr; 1150 for (const auto *EltT : ST->elements()) { 1151 if (EltT != UT->getElementType()) 1152 return nullptr; 1153 } 1154 } 1155 return U; 1156 } 1157 1158 /// Combine stores to match the type of value being stored. 1159 /// 1160 /// The core idea here is that the memory does not have any intrinsic type and 1161 /// where we can we should match the type of a store to the type of value being 1162 /// stored. 1163 /// 1164 /// However, this routine must never change the width of a store or the number of 1165 /// stores as that would introduce a semantic change. This combine is expected to 1166 /// be a semantic no-op which just allows stores to more closely model the types 1167 /// of their incoming values. 1168 /// 1169 /// Currently, we also refuse to change the precise type used for an atomic or 1170 /// volatile store. This is debatable, and might be reasonable to change later. 1171 /// However, it is risky in case some backend or other part of LLVM is relying 1172 /// on the exact type stored to select appropriate atomic operations. 1173 /// 1174 /// \returns true if the store was successfully combined away. This indicates 1175 /// the caller must erase the store instruction. We have to let the caller erase 1176 /// the store instruction as otherwise there is no way to signal whether it was 1177 /// combined or not: IC.EraseInstFromFunction returns a null pointer. 1178 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) { 1179 // FIXME: We could probably with some care handle both volatile and ordered 1180 // atomic stores here but it isn't clear that this is important. 1181 if (!SI.isUnordered()) 1182 return false; 1183 1184 // swifterror values can't be bitcasted. 1185 if (SI.getPointerOperand()->isSwiftError()) 1186 return false; 1187 1188 Value *V = SI.getValueOperand(); 1189 1190 // Fold away bit casts of the stored value by storing the original type. 1191 if (auto *BC = dyn_cast<BitCastInst>(V)) { 1192 V = BC->getOperand(0); 1193 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) { 1194 combineStoreToNewValue(IC, SI, V); 1195 return true; 1196 } 1197 } 1198 1199 if (Value *U = likeBitCastFromVector(IC, V)) 1200 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) { 1201 combineStoreToNewValue(IC, SI, U); 1202 return true; 1203 } 1204 1205 // FIXME: We should also canonicalize stores of vectors when their elements 1206 // are cast to other types. 1207 return false; 1208 } 1209 1210 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { 1211 // FIXME: We could probably with some care handle both volatile and atomic 1212 // stores here but it isn't clear that this is important. 1213 if (!SI.isSimple()) 1214 return false; 1215 1216 Value *V = SI.getValueOperand(); 1217 Type *T = V->getType(); 1218 1219 if (!T->isAggregateType()) 1220 return false; 1221 1222 if (auto *ST = dyn_cast<StructType>(T)) { 1223 // If the struct only have one element, we unpack. 1224 unsigned Count = ST->getNumElements(); 1225 if (Count == 1) { 1226 V = IC.Builder.CreateExtractValue(V, 0); 1227 combineStoreToNewValue(IC, SI, V); 1228 return true; 1229 } 1230 1231 // We don't want to break loads with padding here as we'd loose 1232 // the knowledge that padding exists for the rest of the pipeline. 1233 const DataLayout &DL = IC.getDataLayout(); 1234 auto *SL = DL.getStructLayout(ST); 1235 if (SL->hasPadding()) 1236 return false; 1237 1238 auto Align = SI.getAlignment(); 1239 if (!Align) 1240 Align = DL.getABITypeAlignment(ST); 1241 1242 SmallString<16> EltName = V->getName(); 1243 EltName += ".elt"; 1244 auto *Addr = SI.getPointerOperand(); 1245 SmallString<16> AddrName = Addr->getName(); 1246 AddrName += ".repack"; 1247 1248 auto *IdxType = Type::getInt32Ty(ST->getContext()); 1249 auto *Zero = ConstantInt::get(IdxType, 0); 1250 for (unsigned i = 0; i < Count; i++) { 1251 Value *Indices[2] = { 1252 Zero, 1253 ConstantInt::get(IdxType, i), 1254 }; 1255 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), 1256 AddrName); 1257 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName); 1258 auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); 1259 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign); 1260 AAMDNodes AAMD; 1261 SI.getAAMetadata(AAMD); 1262 NS->setAAMetadata(AAMD); 1263 } 1264 1265 return true; 1266 } 1267 1268 if (auto *AT = dyn_cast<ArrayType>(T)) { 1269 // If the array only have one element, we unpack. 1270 auto NumElements = AT->getNumElements(); 1271 if (NumElements == 1) { 1272 V = IC.Builder.CreateExtractValue(V, 0); 1273 combineStoreToNewValue(IC, SI, V); 1274 return true; 1275 } 1276 1277 // Bail out if the array is too large. Ideally we would like to optimize 1278 // arrays of arbitrary size but this has a terrible impact on compile time. 1279 // The threshold here is chosen arbitrarily, maybe needs a little bit of 1280 // tuning. 1281 if (NumElements > IC.MaxArraySizeForCombine) 1282 return false; 1283 1284 const DataLayout &DL = IC.getDataLayout(); 1285 auto EltSize = DL.getTypeAllocSize(AT->getElementType()); 1286 auto Align = SI.getAlignment(); 1287 if (!Align) 1288 Align = DL.getABITypeAlignment(T); 1289 1290 SmallString<16> EltName = V->getName(); 1291 EltName += ".elt"; 1292 auto *Addr = SI.getPointerOperand(); 1293 SmallString<16> AddrName = Addr->getName(); 1294 AddrName += ".repack"; 1295 1296 auto *IdxType = Type::getInt64Ty(T->getContext()); 1297 auto *Zero = ConstantInt::get(IdxType, 0); 1298 1299 uint64_t Offset = 0; 1300 for (uint64_t i = 0; i < NumElements; i++) { 1301 Value *Indices[2] = { 1302 Zero, 1303 ConstantInt::get(IdxType, i), 1304 }; 1305 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), 1306 AddrName); 1307 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName); 1308 auto EltAlign = MinAlign(Align, Offset); 1309 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign); 1310 AAMDNodes AAMD; 1311 SI.getAAMetadata(AAMD); 1312 NS->setAAMetadata(AAMD); 1313 Offset += EltSize; 1314 } 1315 1316 return true; 1317 } 1318 1319 return false; 1320 } 1321 1322 /// equivalentAddressValues - Test if A and B will obviously have the same 1323 /// value. This includes recognizing that %t0 and %t1 will have the same 1324 /// value in code like this: 1325 /// %t0 = getelementptr \@a, 0, 3 1326 /// store i32 0, i32* %t0 1327 /// %t1 = getelementptr \@a, 0, 3 1328 /// %t2 = load i32* %t1 1329 /// 1330 static bool equivalentAddressValues(Value *A, Value *B) { 1331 // Test if the values are trivially equivalent. 1332 if (A == B) return true; 1333 1334 // Test if the values come form identical arithmetic instructions. 1335 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because 1336 // its only used to compare two uses within the same basic block, which 1337 // means that they'll always either have the same value or one of them 1338 // will have an undefined value. 1339 if (isa<BinaryOperator>(A) || 1340 isa<CastInst>(A) || 1341 isa<PHINode>(A) || 1342 isa<GetElementPtrInst>(A)) 1343 if (Instruction *BI = dyn_cast<Instruction>(B)) 1344 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) 1345 return true; 1346 1347 // Otherwise they may not be equivalent. 1348 return false; 1349 } 1350 1351 /// Converts store (bitcast (load (bitcast (select ...)))) to 1352 /// store (load (select ...)), where select is minmax: 1353 /// select ((cmp load V1, load V2), V1, V2). 1354 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC, 1355 StoreInst &SI) { 1356 // bitcast? 1357 if (!match(SI.getPointerOperand(), m_BitCast(m_Value()))) 1358 return false; 1359 // load? integer? 1360 Value *LoadAddr; 1361 if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr))))) 1362 return false; 1363 auto *LI = cast<LoadInst>(SI.getValueOperand()); 1364 if (!LI->getType()->isIntegerTy()) 1365 return false; 1366 if (!isMinMaxWithLoads(LoadAddr)) 1367 return false; 1368 1369 if (!all_of(LI->users(), [LI, LoadAddr](User *U) { 1370 auto *SI = dyn_cast<StoreInst>(U); 1371 return SI && SI->getPointerOperand() != LI && 1372 peekThroughBitcast(SI->getPointerOperand()) != LoadAddr && 1373 !SI->getPointerOperand()->isSwiftError(); 1374 })) 1375 return false; 1376 1377 IC.Builder.SetInsertPoint(LI); 1378 LoadInst *NewLI = combineLoadToNewType( 1379 IC, *LI, LoadAddr->getType()->getPointerElementType()); 1380 // Replace all the stores with stores of the newly loaded value. 1381 for (auto *UI : LI->users()) { 1382 auto *USI = cast<StoreInst>(UI); 1383 IC.Builder.SetInsertPoint(USI); 1384 combineStoreToNewValue(IC, *USI, NewLI); 1385 } 1386 IC.replaceInstUsesWith(*LI, UndefValue::get(LI->getType())); 1387 IC.eraseInstFromFunction(*LI); 1388 return true; 1389 } 1390 1391 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { 1392 Value *Val = SI.getOperand(0); 1393 Value *Ptr = SI.getOperand(1); 1394 1395 // Try to canonicalize the stored type. 1396 if (combineStoreToValueType(*this, SI)) 1397 return eraseInstFromFunction(SI); 1398 1399 // Attempt to improve the alignment. 1400 unsigned KnownAlign = getOrEnforceKnownAlignment( 1401 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT); 1402 unsigned StoreAlign = SI.getAlignment(); 1403 unsigned EffectiveStoreAlign = 1404 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType()); 1405 1406 if (KnownAlign > EffectiveStoreAlign) 1407 SI.setAlignment(KnownAlign); 1408 else if (StoreAlign == 0) 1409 SI.setAlignment(EffectiveStoreAlign); 1410 1411 // Try to canonicalize the stored type. 1412 if (unpackStoreToAggregate(*this, SI)) 1413 return eraseInstFromFunction(SI); 1414 1415 if (removeBitcastsFromLoadStoreOnMinMax(*this, SI)) 1416 return eraseInstFromFunction(SI); 1417 1418 // Replace GEP indices if possible. 1419 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) { 1420 Worklist.Add(NewGEPI); 1421 return &SI; 1422 } 1423 1424 // Don't hack volatile/ordered stores. 1425 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring. 1426 if (!SI.isUnordered()) return nullptr; 1427 1428 // If the RHS is an alloca with a single use, zapify the store, making the 1429 // alloca dead. 1430 if (Ptr->hasOneUse()) { 1431 if (isa<AllocaInst>(Ptr)) 1432 return eraseInstFromFunction(SI); 1433 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 1434 if (isa<AllocaInst>(GEP->getOperand(0))) { 1435 if (GEP->getOperand(0)->hasOneUse()) 1436 return eraseInstFromFunction(SI); 1437 } 1438 } 1439 } 1440 1441 // Do really simple DSE, to catch cases where there are several consecutive 1442 // stores to the same location, separated by a few arithmetic operations. This 1443 // situation often occurs with bitfield accesses. 1444 BasicBlock::iterator BBI(SI); 1445 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts; 1446 --ScanInsts) { 1447 --BBI; 1448 // Don't count debug info directives, lest they affect codegen, 1449 // and we skip pointer-to-pointer bitcasts, which are NOPs. 1450 if (isa<DbgInfoIntrinsic>(BBI) || 1451 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { 1452 ScanInsts++; 1453 continue; 1454 } 1455 1456 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { 1457 // Prev store isn't volatile, and stores to the same location? 1458 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1), 1459 SI.getOperand(1))) { 1460 ++NumDeadStore; 1461 ++BBI; 1462 eraseInstFromFunction(*PrevSI); 1463 continue; 1464 } 1465 break; 1466 } 1467 1468 // If this is a load, we have to stop. However, if the loaded value is from 1469 // the pointer we're loading and is producing the pointer we're storing, 1470 // then *this* store is dead (X = load P; store X -> P). 1471 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { 1472 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) { 1473 assert(SI.isUnordered() && "can't eliminate ordering operation"); 1474 return eraseInstFromFunction(SI); 1475 } 1476 1477 // Otherwise, this is a load from some other location. Stores before it 1478 // may not be dead. 1479 break; 1480 } 1481 1482 // Don't skip over loads, throws or things that can modify memory. 1483 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow()) 1484 break; 1485 } 1486 1487 // store X, null -> turns into 'unreachable' in SimplifyCFG 1488 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG 1489 if (canSimplifyNullStoreOrGEP(SI)) { 1490 if (!isa<UndefValue>(Val)) { 1491 SI.setOperand(0, UndefValue::get(Val->getType())); 1492 if (Instruction *U = dyn_cast<Instruction>(Val)) 1493 Worklist.Add(U); // Dropped a use. 1494 } 1495 return nullptr; // Do not modify these! 1496 } 1497 1498 // store undef, Ptr -> noop 1499 if (isa<UndefValue>(Val)) 1500 return eraseInstFromFunction(SI); 1501 1502 // If this store is the second-to-last instruction in the basic block 1503 // (excluding debug info and bitcasts of pointers) and if the block ends with 1504 // an unconditional branch, try to move the store to the successor block. 1505 BBI = SI.getIterator(); 1506 do { 1507 ++BBI; 1508 } while (isa<DbgInfoIntrinsic>(BBI) || 1509 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())); 1510 1511 if (BranchInst *BI = dyn_cast<BranchInst>(BBI)) 1512 if (BI->isUnconditional()) 1513 mergeStoreIntoSuccessor(SI); 1514 1515 return nullptr; 1516 } 1517 1518 /// Try to transform: 1519 /// if () { *P = v1; } else { *P = v2 } 1520 /// or: 1521 /// *P = v1; if () { *P = v2; } 1522 /// into a phi node with a store in the successor. 1523 bool InstCombiner::mergeStoreIntoSuccessor(StoreInst &SI) { 1524 assert(SI.isUnordered() && 1525 "This code has not been audited for volatile or ordered store case."); 1526 1527 // Check if the successor block has exactly 2 incoming edges. 1528 BasicBlock *StoreBB = SI.getParent(); 1529 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0); 1530 if (!DestBB->hasNPredecessors(2)) 1531 return false; 1532 1533 // Capture the other block (the block that doesn't contain our store). 1534 pred_iterator PredIter = pred_begin(DestBB); 1535 if (*PredIter == StoreBB) 1536 ++PredIter; 1537 BasicBlock *OtherBB = *PredIter; 1538 1539 // Bail out if all of the relevant blocks aren't distinct. This can happen, 1540 // for example, if SI is in an infinite loop. 1541 if (StoreBB == DestBB || OtherBB == DestBB) 1542 return false; 1543 1544 // Verify that the other block ends in a branch and is not otherwise empty. 1545 BasicBlock::iterator BBI(OtherBB->getTerminator()); 1546 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI); 1547 if (!OtherBr || BBI == OtherBB->begin()) 1548 return false; 1549 1550 // If the other block ends in an unconditional branch, check for the 'if then 1551 // else' case. There is an instruction before the branch. 1552 StoreInst *OtherStore = nullptr; 1553 if (OtherBr->isUnconditional()) { 1554 --BBI; 1555 // Skip over debugging info. 1556 while (isa<DbgInfoIntrinsic>(BBI) || 1557 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { 1558 if (BBI==OtherBB->begin()) 1559 return false; 1560 --BBI; 1561 } 1562 // If this isn't a store, isn't a store to the same location, or is not the 1563 // right kind of store, bail out. 1564 OtherStore = dyn_cast<StoreInst>(BBI); 1565 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) || 1566 !SI.isSameOperationAs(OtherStore)) 1567 return false; 1568 } else { 1569 // Otherwise, the other block ended with a conditional branch. If one of the 1570 // destinations is StoreBB, then we have the if/then case. 1571 if (OtherBr->getSuccessor(0) != StoreBB && 1572 OtherBr->getSuccessor(1) != StoreBB) 1573 return false; 1574 1575 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an 1576 // if/then triangle. See if there is a store to the same ptr as SI that 1577 // lives in OtherBB. 1578 for (;; --BBI) { 1579 // Check to see if we find the matching store. 1580 if ((OtherStore = dyn_cast<StoreInst>(BBI))) { 1581 if (OtherStore->getOperand(1) != SI.getOperand(1) || 1582 !SI.isSameOperationAs(OtherStore)) 1583 return false; 1584 break; 1585 } 1586 // If we find something that may be using or overwriting the stored 1587 // value, or if we run out of instructions, we can't do the transform. 1588 if (BBI->mayReadFromMemory() || BBI->mayThrow() || 1589 BBI->mayWriteToMemory() || BBI == OtherBB->begin()) 1590 return false; 1591 } 1592 1593 // In order to eliminate the store in OtherBr, we have to make sure nothing 1594 // reads or overwrites the stored value in StoreBB. 1595 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) { 1596 // FIXME: This should really be AA driven. 1597 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory()) 1598 return false; 1599 } 1600 } 1601 1602 // Insert a PHI node now if we need it. 1603 Value *MergedVal = OtherStore->getOperand(0); 1604 // The debug locations of the original instructions might differ. Merge them. 1605 DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(), 1606 OtherStore->getDebugLoc()); 1607 if (MergedVal != SI.getOperand(0)) { 1608 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge"); 1609 PN->addIncoming(SI.getOperand(0), SI.getParent()); 1610 PN->addIncoming(OtherStore->getOperand(0), OtherBB); 1611 MergedVal = InsertNewInstBefore(PN, DestBB->front()); 1612 PN->setDebugLoc(MergedLoc); 1613 } 1614 1615 // Advance to a place where it is safe to insert the new store and insert it. 1616 BBI = DestBB->getFirstInsertionPt(); 1617 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), 1618 SI.isVolatile(), SI.getAlignment(), 1619 SI.getOrdering(), SI.getSyncScopeID()); 1620 InsertNewInstBefore(NewSI, *BBI); 1621 NewSI->setDebugLoc(MergedLoc); 1622 1623 // If the two stores had AA tags, merge them. 1624 AAMDNodes AATags; 1625 SI.getAAMetadata(AATags); 1626 if (AATags) { 1627 OtherStore->getAAMetadata(AATags, /* Merge = */ true); 1628 NewSI->setAAMetadata(AATags); 1629 } 1630 1631 // Nuke the old stores. 1632 eraseInstFromFunction(SI); 1633 eraseInstFromFunction(*OtherStore); 1634 return true; 1635 } 1636