1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for load, store and alloca. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/MapVector.h" 15 #include "llvm/ADT/SmallString.h" 16 #include "llvm/ADT/Statistic.h" 17 #include "llvm/Analysis/Loads.h" 18 #include "llvm/Transforms/Utils/Local.h" 19 #include "llvm/IR/ConstantRange.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/DebugInfoMetadata.h" 22 #include "llvm/IR/IntrinsicInst.h" 23 #include "llvm/IR/LLVMContext.h" 24 #include "llvm/IR/MDBuilder.h" 25 #include "llvm/IR/PatternMatch.h" 26 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 27 using namespace llvm; 28 using namespace PatternMatch; 29 30 #define DEBUG_TYPE "instcombine" 31 32 STATISTIC(NumDeadStore, "Number of dead stores eliminated"); 33 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global"); 34 35 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to 36 /// some part of a constant global variable. This intentionally only accepts 37 /// constant expressions because we can't rewrite arbitrary instructions. 38 static bool pointsToConstantGlobal(Value *V) { 39 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 40 return GV->isConstant(); 41 42 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 43 if (CE->getOpcode() == Instruction::BitCast || 44 CE->getOpcode() == Instruction::AddrSpaceCast || 45 CE->getOpcode() == Instruction::GetElementPtr) 46 return pointsToConstantGlobal(CE->getOperand(0)); 47 } 48 return false; 49 } 50 51 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 52 /// pointer to an alloca. Ignore any reads of the pointer, return false if we 53 /// see any stores or other unknown uses. If we see pointer arithmetic, keep 54 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse 55 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 56 /// the alloca, and if the source pointer is a pointer to a constant global, we 57 /// can optimize this. 58 static bool 59 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, 60 SmallVectorImpl<Instruction *> &ToDelete) { 61 // We track lifetime intrinsics as we encounter them. If we decide to go 62 // ahead and replace the value with the global, this lets the caller quickly 63 // eliminate the markers. 64 65 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect; 66 ValuesToInspect.emplace_back(V, false); 67 while (!ValuesToInspect.empty()) { 68 auto ValuePair = ValuesToInspect.pop_back_val(); 69 const bool IsOffset = ValuePair.second; 70 for (auto &U : ValuePair.first->uses()) { 71 auto *I = cast<Instruction>(U.getUser()); 72 73 if (auto *LI = dyn_cast<LoadInst>(I)) { 74 // Ignore non-volatile loads, they are always ok. 75 if (!LI->isSimple()) return false; 76 continue; 77 } 78 79 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) { 80 // If uses of the bitcast are ok, we are ok. 81 ValuesToInspect.emplace_back(I, IsOffset); 82 continue; 83 } 84 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 85 // If the GEP has all zero indices, it doesn't offset the pointer. If it 86 // doesn't, it does. 87 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices()); 88 continue; 89 } 90 91 if (auto *Call = dyn_cast<CallBase>(I)) { 92 // If this is the function being called then we treat it like a load and 93 // ignore it. 94 if (Call->isCallee(&U)) 95 continue; 96 97 unsigned DataOpNo = Call->getDataOperandNo(&U); 98 bool IsArgOperand = Call->isArgOperand(&U); 99 100 // Inalloca arguments are clobbered by the call. 101 if (IsArgOperand && Call->isInAllocaArgument(DataOpNo)) 102 return false; 103 104 // If this is a readonly/readnone call site, then we know it is just a 105 // load (but one that potentially returns the value itself), so we can 106 // ignore it if we know that the value isn't captured. 107 if (Call->onlyReadsMemory() && 108 (Call->use_empty() || Call->doesNotCapture(DataOpNo))) 109 continue; 110 111 // If this is being passed as a byval argument, the caller is making a 112 // copy, so it is only a read of the alloca. 113 if (IsArgOperand && Call->isByValArgument(DataOpNo)) 114 continue; 115 } 116 117 // Lifetime intrinsics can be handled by the caller. 118 if (I->isLifetimeStartOrEnd()) { 119 assert(I->use_empty() && "Lifetime markers have no result to use!"); 120 ToDelete.push_back(I); 121 continue; 122 } 123 124 // If this is isn't our memcpy/memmove, reject it as something we can't 125 // handle. 126 MemTransferInst *MI = dyn_cast<MemTransferInst>(I); 127 if (!MI) 128 return false; 129 130 // If the transfer is using the alloca as a source of the transfer, then 131 // ignore it since it is a load (unless the transfer is volatile). 132 if (U.getOperandNo() == 1) { 133 if (MI->isVolatile()) return false; 134 continue; 135 } 136 137 // If we already have seen a copy, reject the second one. 138 if (TheCopy) return false; 139 140 // If the pointer has been offset from the start of the alloca, we can't 141 // safely handle this. 142 if (IsOffset) return false; 143 144 // If the memintrinsic isn't using the alloca as the dest, reject it. 145 if (U.getOperandNo() != 0) return false; 146 147 // If the source of the memcpy/move is not a constant global, reject it. 148 if (!pointsToConstantGlobal(MI->getSource())) 149 return false; 150 151 // Otherwise, the transform is safe. Remember the copy instruction. 152 TheCopy = MI; 153 } 154 } 155 return true; 156 } 157 158 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 159 /// modified by a copy from a constant global. If we can prove this, we can 160 /// replace any uses of the alloca with uses of the global directly. 161 static MemTransferInst * 162 isOnlyCopiedFromConstantGlobal(AllocaInst *AI, 163 SmallVectorImpl<Instruction *> &ToDelete) { 164 MemTransferInst *TheCopy = nullptr; 165 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) 166 return TheCopy; 167 return nullptr; 168 } 169 170 /// Returns true if V is dereferenceable for size of alloca. 171 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, 172 const DataLayout &DL) { 173 if (AI->isArrayAllocation()) 174 return false; 175 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType()); 176 if (!AllocaSize) 177 return false; 178 return isDereferenceableAndAlignedPointer(V, Align(AI->getAlignment()), 179 APInt(64, AllocaSize), DL); 180 } 181 182 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) { 183 // Check for array size of 1 (scalar allocation). 184 if (!AI.isArrayAllocation()) { 185 // i32 1 is the canonical array size for scalar allocations. 186 if (AI.getArraySize()->getType()->isIntegerTy(32)) 187 return nullptr; 188 189 // Canonicalize it. 190 Value *V = IC.Builder.getInt32(1); 191 AI.setOperand(0, V); 192 return &AI; 193 } 194 195 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 196 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { 197 if (C->getValue().getActiveBits() <= 64) { 198 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); 199 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName()); 200 New->setAlignment(MaybeAlign(AI.getAlignment())); 201 202 // Scan to the end of the allocation instructions, to skip over a block of 203 // allocas if possible...also skip interleaved debug info 204 // 205 BasicBlock::iterator It(New); 206 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) 207 ++It; 208 209 // Now that I is pointing to the first non-allocation-inst in the block, 210 // insert our getelementptr instruction... 211 // 212 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType()); 213 Value *NullIdx = Constant::getNullValue(IdxTy); 214 Value *Idx[2] = {NullIdx, NullIdx}; 215 Instruction *GEP = GetElementPtrInst::CreateInBounds( 216 NewTy, New, Idx, New->getName() + ".sub"); 217 IC.InsertNewInstBefore(GEP, *It); 218 219 // Now make everything use the getelementptr instead of the original 220 // allocation. 221 return IC.replaceInstUsesWith(AI, GEP); 222 } 223 } 224 225 if (isa<UndefValue>(AI.getArraySize())) 226 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); 227 228 // Ensure that the alloca array size argument has type intptr_t, so that 229 // any casting is exposed early. 230 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType()); 231 if (AI.getArraySize()->getType() != IntPtrTy) { 232 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false); 233 AI.setOperand(0, V); 234 return &AI; 235 } 236 237 return nullptr; 238 } 239 240 namespace { 241 // If I and V are pointers in different address space, it is not allowed to 242 // use replaceAllUsesWith since I and V have different types. A 243 // non-target-specific transformation should not use addrspacecast on V since 244 // the two address space may be disjoint depending on target. 245 // 246 // This class chases down uses of the old pointer until reaching the load 247 // instructions, then replaces the old pointer in the load instructions with 248 // the new pointer. If during the chasing it sees bitcast or GEP, it will 249 // create new bitcast or GEP with the new pointer and use them in the load 250 // instruction. 251 class PointerReplacer { 252 public: 253 PointerReplacer(InstCombiner &IC) : IC(IC) {} 254 void replacePointer(Instruction &I, Value *V); 255 256 private: 257 void findLoadAndReplace(Instruction &I); 258 void replace(Instruction *I); 259 Value *getReplacement(Value *I); 260 261 SmallVector<Instruction *, 4> Path; 262 MapVector<Value *, Value *> WorkMap; 263 InstCombiner &IC; 264 }; 265 } // end anonymous namespace 266 267 void PointerReplacer::findLoadAndReplace(Instruction &I) { 268 for (auto U : I.users()) { 269 auto *Inst = dyn_cast<Instruction>(&*U); 270 if (!Inst) 271 return; 272 LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n'); 273 if (isa<LoadInst>(Inst)) { 274 for (auto P : Path) 275 replace(P); 276 replace(Inst); 277 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) { 278 Path.push_back(Inst); 279 findLoadAndReplace(*Inst); 280 Path.pop_back(); 281 } else { 282 return; 283 } 284 } 285 } 286 287 Value *PointerReplacer::getReplacement(Value *V) { 288 auto Loc = WorkMap.find(V); 289 if (Loc != WorkMap.end()) 290 return Loc->second; 291 return nullptr; 292 } 293 294 void PointerReplacer::replace(Instruction *I) { 295 if (getReplacement(I)) 296 return; 297 298 if (auto *LT = dyn_cast<LoadInst>(I)) { 299 auto *V = getReplacement(LT->getPointerOperand()); 300 assert(V && "Operand not replaced"); 301 auto *NewI = new LoadInst(I->getType(), V); 302 NewI->takeName(LT); 303 IC.InsertNewInstWith(NewI, *LT); 304 IC.replaceInstUsesWith(*LT, NewI); 305 WorkMap[LT] = NewI; 306 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 307 auto *V = getReplacement(GEP->getPointerOperand()); 308 assert(V && "Operand not replaced"); 309 SmallVector<Value *, 8> Indices; 310 Indices.append(GEP->idx_begin(), GEP->idx_end()); 311 auto *NewI = GetElementPtrInst::Create( 312 V->getType()->getPointerElementType(), V, Indices); 313 IC.InsertNewInstWith(NewI, *GEP); 314 NewI->takeName(GEP); 315 WorkMap[GEP] = NewI; 316 } else if (auto *BC = dyn_cast<BitCastInst>(I)) { 317 auto *V = getReplacement(BC->getOperand(0)); 318 assert(V && "Operand not replaced"); 319 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(), 320 V->getType()->getPointerAddressSpace()); 321 auto *NewI = new BitCastInst(V, NewT); 322 IC.InsertNewInstWith(NewI, *BC); 323 NewI->takeName(BC); 324 WorkMap[BC] = NewI; 325 } else { 326 llvm_unreachable("should never reach here"); 327 } 328 } 329 330 void PointerReplacer::replacePointer(Instruction &I, Value *V) { 331 #ifndef NDEBUG 332 auto *PT = cast<PointerType>(I.getType()); 333 auto *NT = cast<PointerType>(V->getType()); 334 assert(PT != NT && PT->getElementType() == NT->getElementType() && 335 "Invalid usage"); 336 #endif 337 WorkMap[&I] = V; 338 findLoadAndReplace(I); 339 } 340 341 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { 342 if (auto *I = simplifyAllocaArraySize(*this, AI)) 343 return I; 344 345 if (AI.getAllocatedType()->isSized()) { 346 // If the alignment is 0 (unspecified), assign it the preferred alignment. 347 if (AI.getAlignment() == 0) 348 AI.setAlignment( 349 MaybeAlign(DL.getPrefTypeAlignment(AI.getAllocatedType()))); 350 351 // Move all alloca's of zero byte objects to the entry block and merge them 352 // together. Note that we only do this for alloca's, because malloc should 353 // allocate and return a unique pointer, even for a zero byte allocation. 354 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) { 355 // For a zero sized alloca there is no point in doing an array allocation. 356 // This is helpful if the array size is a complicated expression not used 357 // elsewhere. 358 if (AI.isArrayAllocation()) { 359 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1)); 360 return &AI; 361 } 362 363 // Get the first instruction in the entry block. 364 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock(); 365 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg(); 366 if (FirstInst != &AI) { 367 // If the entry block doesn't start with a zero-size alloca then move 368 // this one to the start of the entry block. There is no problem with 369 // dominance as the array size was forced to a constant earlier already. 370 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst); 371 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() || 372 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) { 373 AI.moveBefore(FirstInst); 374 return &AI; 375 } 376 377 // If the alignment of the entry block alloca is 0 (unspecified), 378 // assign it the preferred alignment. 379 if (EntryAI->getAlignment() == 0) 380 EntryAI->setAlignment( 381 MaybeAlign(DL.getPrefTypeAlignment(EntryAI->getAllocatedType()))); 382 // Replace this zero-sized alloca with the one at the start of the entry 383 // block after ensuring that the address will be aligned enough for both 384 // types. 385 const MaybeAlign MaxAlign( 386 std::max(EntryAI->getAlignment(), AI.getAlignment())); 387 EntryAI->setAlignment(MaxAlign); 388 if (AI.getType() != EntryAI->getType()) 389 return new BitCastInst(EntryAI, AI.getType()); 390 return replaceInstUsesWith(AI, EntryAI); 391 } 392 } 393 } 394 395 if (AI.getAlignment()) { 396 // Check to see if this allocation is only modified by a memcpy/memmove from 397 // a constant global whose alignment is equal to or exceeds that of the 398 // allocation. If this is the case, we can change all users to use 399 // the constant global instead. This is commonly produced by the CFE by 400 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 401 // is only subsequently read. 402 SmallVector<Instruction *, 4> ToDelete; 403 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { 404 unsigned SourceAlign = getOrEnforceKnownAlignment( 405 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT); 406 if (AI.getAlignment() <= SourceAlign && 407 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) { 408 LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); 409 LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); 410 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) 411 eraseInstFromFunction(*ToDelete[i]); 412 Constant *TheSrc = cast<Constant>(Copy->getSource()); 413 auto *SrcTy = TheSrc->getType(); 414 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(), 415 SrcTy->getPointerAddressSpace()); 416 Constant *Cast = 417 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy); 418 if (AI.getType()->getPointerAddressSpace() == 419 SrcTy->getPointerAddressSpace()) { 420 Instruction *NewI = replaceInstUsesWith(AI, Cast); 421 eraseInstFromFunction(*Copy); 422 ++NumGlobalCopies; 423 return NewI; 424 } else { 425 PointerReplacer PtrReplacer(*this); 426 PtrReplacer.replacePointer(AI, Cast); 427 ++NumGlobalCopies; 428 } 429 } 430 } 431 } 432 433 // At last, use the generic allocation site handler to aggressively remove 434 // unused allocas. 435 return visitAllocSite(AI); 436 } 437 438 // Are we allowed to form a atomic load or store of this type? 439 static bool isSupportedAtomicType(Type *Ty) { 440 return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy(); 441 } 442 443 /// Helper to combine a load to a new type. 444 /// 445 /// This just does the work of combining a load to a new type. It handles 446 /// metadata, etc., and returns the new instruction. The \c NewTy should be the 447 /// loaded *value* type. This will convert it to a pointer, cast the operand to 448 /// that pointer type, load it, etc. 449 /// 450 /// Note that this will create all of the instructions with whatever insert 451 /// point the \c InstCombiner currently is using. 452 LoadInst *InstCombiner::combineLoadToNewType(LoadInst &LI, Type *NewTy, 453 const Twine &Suffix) { 454 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) && 455 "can't fold an atomic load to requested type"); 456 457 Value *Ptr = LI.getPointerOperand(); 458 unsigned AS = LI.getPointerAddressSpace(); 459 Value *NewPtr = nullptr; 460 if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) && 461 NewPtr->getType()->getPointerElementType() == NewTy && 462 NewPtr->getType()->getPointerAddressSpace() == AS)) 463 NewPtr = Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)); 464 465 // If old load did not have an explicit alignment specified, 466 // manually preserve the implied (ABI) alignment of the load. 467 // Else we may inadvertently incorrectly over-promise alignment. 468 const auto Align = 469 getDataLayout().getValueOrABITypeAlignment(LI.getAlign(), LI.getType()); 470 471 LoadInst *NewLoad = Builder.CreateAlignedLoad( 472 NewTy, NewPtr, Align, LI.isVolatile(), LI.getName() + Suffix); 473 NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 474 copyMetadataForLoad(*NewLoad, LI); 475 return NewLoad; 476 } 477 478 /// Combine a store to a new type. 479 /// 480 /// Returns the newly created store instruction. 481 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) { 482 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) && 483 "can't fold an atomic store of requested type"); 484 485 Value *Ptr = SI.getPointerOperand(); 486 unsigned AS = SI.getPointerAddressSpace(); 487 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 488 SI.getAllMetadata(MD); 489 490 StoreInst *NewStore = IC.Builder.CreateAlignedStore( 491 V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)), 492 SI.getAlign(), SI.isVolatile()); 493 NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID()); 494 for (const auto &MDPair : MD) { 495 unsigned ID = MDPair.first; 496 MDNode *N = MDPair.second; 497 // Note, essentially every kind of metadata should be preserved here! This 498 // routine is supposed to clone a store instruction changing *only its 499 // type*. The only metadata it makes sense to drop is metadata which is 500 // invalidated when the pointer type changes. This should essentially 501 // never be the case in LLVM, but we explicitly switch over only known 502 // metadata to be conservatively correct. If you are adding metadata to 503 // LLVM which pertains to stores, you almost certainly want to add it 504 // here. 505 switch (ID) { 506 case LLVMContext::MD_dbg: 507 case LLVMContext::MD_tbaa: 508 case LLVMContext::MD_prof: 509 case LLVMContext::MD_fpmath: 510 case LLVMContext::MD_tbaa_struct: 511 case LLVMContext::MD_alias_scope: 512 case LLVMContext::MD_noalias: 513 case LLVMContext::MD_nontemporal: 514 case LLVMContext::MD_mem_parallel_loop_access: 515 case LLVMContext::MD_access_group: 516 // All of these directly apply. 517 NewStore->setMetadata(ID, N); 518 break; 519 case LLVMContext::MD_invariant_load: 520 case LLVMContext::MD_nonnull: 521 case LLVMContext::MD_range: 522 case LLVMContext::MD_align: 523 case LLVMContext::MD_dereferenceable: 524 case LLVMContext::MD_dereferenceable_or_null: 525 // These don't apply for stores. 526 break; 527 } 528 } 529 530 return NewStore; 531 } 532 533 /// Returns true if instruction represent minmax pattern like: 534 /// select ((cmp load V1, load V2), V1, V2). 535 static bool isMinMaxWithLoads(Value *V, Type *&LoadTy) { 536 assert(V->getType()->isPointerTy() && "Expected pointer type."); 537 // Ignore possible ty* to ixx* bitcast. 538 V = peekThroughBitcast(V); 539 // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax 540 // pattern. 541 CmpInst::Predicate Pred; 542 Instruction *L1; 543 Instruction *L2; 544 Value *LHS; 545 Value *RHS; 546 if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)), 547 m_Value(LHS), m_Value(RHS)))) 548 return false; 549 LoadTy = L1->getType(); 550 return (match(L1, m_Load(m_Specific(LHS))) && 551 match(L2, m_Load(m_Specific(RHS)))) || 552 (match(L1, m_Load(m_Specific(RHS))) && 553 match(L2, m_Load(m_Specific(LHS)))); 554 } 555 556 /// Combine loads to match the type of their uses' value after looking 557 /// through intervening bitcasts. 558 /// 559 /// The core idea here is that if the result of a load is used in an operation, 560 /// we should load the type most conducive to that operation. For example, when 561 /// loading an integer and converting that immediately to a pointer, we should 562 /// instead directly load a pointer. 563 /// 564 /// However, this routine must never change the width of a load or the number of 565 /// loads as that would introduce a semantic change. This combine is expected to 566 /// be a semantic no-op which just allows loads to more closely model the types 567 /// of their consuming operations. 568 /// 569 /// Currently, we also refuse to change the precise type used for an atomic load 570 /// or a volatile load. This is debatable, and might be reasonable to change 571 /// later. However, it is risky in case some backend or other part of LLVM is 572 /// relying on the exact type loaded to select appropriate atomic operations. 573 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) { 574 // FIXME: We could probably with some care handle both volatile and ordered 575 // atomic loads here but it isn't clear that this is important. 576 if (!LI.isUnordered()) 577 return nullptr; 578 579 if (LI.use_empty()) 580 return nullptr; 581 582 // swifterror values can't be bitcasted. 583 if (LI.getPointerOperand()->isSwiftError()) 584 return nullptr; 585 586 Type *Ty = LI.getType(); 587 const DataLayout &DL = IC.getDataLayout(); 588 589 // Try to canonicalize loads which are only ever stored to operate over 590 // integers instead of any other type. We only do this when the loaded type 591 // is sized and has a size exactly the same as its store size and the store 592 // size is a legal integer type. 593 // Do not perform canonicalization if minmax pattern is found (to avoid 594 // infinite loop). 595 Type *Dummy; 596 if (!Ty->isIntegerTy() && Ty->isSized() && 597 !(Ty->isVectorTy() && Ty->getVectorIsScalable()) && 598 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) && 599 DL.typeSizeEqualsStoreSize(Ty) && 600 !DL.isNonIntegralPointerType(Ty) && 601 !isMinMaxWithLoads( 602 peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true), 603 Dummy)) { 604 if (all_of(LI.users(), [&LI](User *U) { 605 auto *SI = dyn_cast<StoreInst>(U); 606 return SI && SI->getPointerOperand() != &LI && 607 !SI->getPointerOperand()->isSwiftError(); 608 })) { 609 LoadInst *NewLoad = IC.combineLoadToNewType( 610 LI, Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty))); 611 // Replace all the stores with stores of the newly loaded value. 612 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) { 613 auto *SI = cast<StoreInst>(*UI++); 614 IC.Builder.SetInsertPoint(SI); 615 combineStoreToNewValue(IC, *SI, NewLoad); 616 IC.eraseInstFromFunction(*SI); 617 } 618 assert(LI.use_empty() && "Failed to remove all users of the load!"); 619 // Return the old load so the combiner can delete it safely. 620 return &LI; 621 } 622 } 623 624 // Fold away bit casts of the loaded value by loading the desired type. 625 // We can do this for BitCastInsts as well as casts from and to pointer types, 626 // as long as those are noops (i.e., the source or dest type have the same 627 // bitwidth as the target's pointers). 628 if (LI.hasOneUse()) 629 if (auto* CI = dyn_cast<CastInst>(LI.user_back())) 630 if (CI->isNoopCast(DL)) 631 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) { 632 LoadInst *NewLoad = IC.combineLoadToNewType(LI, CI->getDestTy()); 633 CI->replaceAllUsesWith(NewLoad); 634 IC.eraseInstFromFunction(*CI); 635 return &LI; 636 } 637 638 // FIXME: We should also canonicalize loads of vectors when their elements are 639 // cast to other types. 640 return nullptr; 641 } 642 643 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) { 644 // FIXME: We could probably with some care handle both volatile and atomic 645 // stores here but it isn't clear that this is important. 646 if (!LI.isSimple()) 647 return nullptr; 648 649 Type *T = LI.getType(); 650 if (!T->isAggregateType()) 651 return nullptr; 652 653 StringRef Name = LI.getName(); 654 assert(LI.getAlignment() && "Alignment must be set at this point"); 655 656 if (auto *ST = dyn_cast<StructType>(T)) { 657 // If the struct only have one element, we unpack. 658 auto NumElements = ST->getNumElements(); 659 if (NumElements == 1) { 660 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U), 661 ".unpack"); 662 AAMDNodes AAMD; 663 LI.getAAMetadata(AAMD); 664 NewLoad->setAAMetadata(AAMD); 665 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue( 666 UndefValue::get(T), NewLoad, 0, Name)); 667 } 668 669 // We don't want to break loads with padding here as we'd loose 670 // the knowledge that padding exists for the rest of the pipeline. 671 const DataLayout &DL = IC.getDataLayout(); 672 auto *SL = DL.getStructLayout(ST); 673 if (SL->hasPadding()) 674 return nullptr; 675 676 const auto Align = DL.getValueOrABITypeAlignment(LI.getAlign(), ST); 677 678 auto *Addr = LI.getPointerOperand(); 679 auto *IdxType = Type::getInt32Ty(T->getContext()); 680 auto *Zero = ConstantInt::get(IdxType, 0); 681 682 Value *V = UndefValue::get(T); 683 for (unsigned i = 0; i < NumElements; i++) { 684 Value *Indices[2] = { 685 Zero, 686 ConstantInt::get(IdxType, i), 687 }; 688 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), 689 Name + ".elt"); 690 auto *L = IC.Builder.CreateAlignedLoad( 691 ST->getElementType(i), Ptr, 692 commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack"); 693 // Propagate AA metadata. It'll still be valid on the narrowed load. 694 AAMDNodes AAMD; 695 LI.getAAMetadata(AAMD); 696 L->setAAMetadata(AAMD); 697 V = IC.Builder.CreateInsertValue(V, L, i); 698 } 699 700 V->setName(Name); 701 return IC.replaceInstUsesWith(LI, V); 702 } 703 704 if (auto *AT = dyn_cast<ArrayType>(T)) { 705 auto *ET = AT->getElementType(); 706 auto NumElements = AT->getNumElements(); 707 if (NumElements == 1) { 708 LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack"); 709 AAMDNodes AAMD; 710 LI.getAAMetadata(AAMD); 711 NewLoad->setAAMetadata(AAMD); 712 return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue( 713 UndefValue::get(T), NewLoad, 0, Name)); 714 } 715 716 // Bail out if the array is too large. Ideally we would like to optimize 717 // arrays of arbitrary size but this has a terrible impact on compile time. 718 // The threshold here is chosen arbitrarily, maybe needs a little bit of 719 // tuning. 720 if (NumElements > IC.MaxArraySizeForCombine) 721 return nullptr; 722 723 const DataLayout &DL = IC.getDataLayout(); 724 auto EltSize = DL.getTypeAllocSize(ET); 725 const auto Align = DL.getValueOrABITypeAlignment(LI.getAlign(), T); 726 727 auto *Addr = LI.getPointerOperand(); 728 auto *IdxType = Type::getInt64Ty(T->getContext()); 729 auto *Zero = ConstantInt::get(IdxType, 0); 730 731 Value *V = UndefValue::get(T); 732 uint64_t Offset = 0; 733 for (uint64_t i = 0; i < NumElements; i++) { 734 Value *Indices[2] = { 735 Zero, 736 ConstantInt::get(IdxType, i), 737 }; 738 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), 739 Name + ".elt"); 740 auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr, 741 commonAlignment(Align, Offset), 742 Name + ".unpack"); 743 AAMDNodes AAMD; 744 LI.getAAMetadata(AAMD); 745 L->setAAMetadata(AAMD); 746 V = IC.Builder.CreateInsertValue(V, L, i); 747 Offset += EltSize; 748 } 749 750 V->setName(Name); 751 return IC.replaceInstUsesWith(LI, V); 752 } 753 754 return nullptr; 755 } 756 757 // If we can determine that all possible objects pointed to by the provided 758 // pointer value are, not only dereferenceable, but also definitively less than 759 // or equal to the provided maximum size, then return true. Otherwise, return 760 // false (constant global values and allocas fall into this category). 761 // 762 // FIXME: This should probably live in ValueTracking (or similar). 763 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, 764 const DataLayout &DL) { 765 SmallPtrSet<Value *, 4> Visited; 766 SmallVector<Value *, 4> Worklist(1, V); 767 768 do { 769 Value *P = Worklist.pop_back_val(); 770 P = P->stripPointerCasts(); 771 772 if (!Visited.insert(P).second) 773 continue; 774 775 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 776 Worklist.push_back(SI->getTrueValue()); 777 Worklist.push_back(SI->getFalseValue()); 778 continue; 779 } 780 781 if (PHINode *PN = dyn_cast<PHINode>(P)) { 782 for (Value *IncValue : PN->incoming_values()) 783 Worklist.push_back(IncValue); 784 continue; 785 } 786 787 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) { 788 if (GA->isInterposable()) 789 return false; 790 Worklist.push_back(GA->getAliasee()); 791 continue; 792 } 793 794 // If we know how big this object is, and it is less than MaxSize, continue 795 // searching. Otherwise, return false. 796 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) { 797 if (!AI->getAllocatedType()->isSized()) 798 return false; 799 800 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize()); 801 if (!CS) 802 return false; 803 804 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType()); 805 // Make sure that, even if the multiplication below would wrap as an 806 // uint64_t, we still do the right thing. 807 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize)) 808 return false; 809 continue; 810 } 811 812 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 813 if (!GV->hasDefinitiveInitializer() || !GV->isConstant()) 814 return false; 815 816 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType()); 817 if (InitSize > MaxSize) 818 return false; 819 continue; 820 } 821 822 return false; 823 } while (!Worklist.empty()); 824 825 return true; 826 } 827 828 // If we're indexing into an object of a known size, and the outer index is 829 // not a constant, but having any value but zero would lead to undefined 830 // behavior, replace it with zero. 831 // 832 // For example, if we have: 833 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4 834 // ... 835 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x 836 // ... = load i32* %arrayidx, align 4 837 // Then we know that we can replace %x in the GEP with i64 0. 838 // 839 // FIXME: We could fold any GEP index to zero that would cause UB if it were 840 // not zero. Currently, we only handle the first such index. Also, we could 841 // also search through non-zero constant indices if we kept track of the 842 // offsets those indices implied. 843 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI, 844 Instruction *MemI, unsigned &Idx) { 845 if (GEPI->getNumOperands() < 2) 846 return false; 847 848 // Find the first non-zero index of a GEP. If all indices are zero, return 849 // one past the last index. 850 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) { 851 unsigned I = 1; 852 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) { 853 Value *V = GEPI->getOperand(I); 854 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) 855 if (CI->isZero()) 856 continue; 857 858 break; 859 } 860 861 return I; 862 }; 863 864 // Skip through initial 'zero' indices, and find the corresponding pointer 865 // type. See if the next index is not a constant. 866 Idx = FirstNZIdx(GEPI); 867 if (Idx == GEPI->getNumOperands()) 868 return false; 869 if (isa<Constant>(GEPI->getOperand(Idx))) 870 return false; 871 872 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx); 873 Type *AllocTy = 874 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops); 875 if (!AllocTy || !AllocTy->isSized()) 876 return false; 877 const DataLayout &DL = IC.getDataLayout(); 878 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy); 879 880 // If there are more indices after the one we might replace with a zero, make 881 // sure they're all non-negative. If any of them are negative, the overall 882 // address being computed might be before the base address determined by the 883 // first non-zero index. 884 auto IsAllNonNegative = [&]() { 885 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) { 886 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI); 887 if (Known.isNonNegative()) 888 continue; 889 return false; 890 } 891 892 return true; 893 }; 894 895 // FIXME: If the GEP is not inbounds, and there are extra indices after the 896 // one we'll replace, those could cause the address computation to wrap 897 // (rendering the IsAllNonNegative() check below insufficient). We can do 898 // better, ignoring zero indices (and other indices we can prove small 899 // enough not to wrap). 900 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds()) 901 return false; 902 903 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is 904 // also known to be dereferenceable. 905 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) && 906 IsAllNonNegative(); 907 } 908 909 // If we're indexing into an object with a variable index for the memory 910 // access, but the object has only one element, we can assume that the index 911 // will always be zero. If we replace the GEP, return it. 912 template <typename T> 913 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr, 914 T &MemI) { 915 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) { 916 unsigned Idx; 917 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) { 918 Instruction *NewGEPI = GEPI->clone(); 919 NewGEPI->setOperand(Idx, 920 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0)); 921 NewGEPI->insertBefore(GEPI); 922 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI); 923 return NewGEPI; 924 } 925 } 926 927 return nullptr; 928 } 929 930 static bool canSimplifyNullStoreOrGEP(StoreInst &SI) { 931 if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace())) 932 return false; 933 934 auto *Ptr = SI.getPointerOperand(); 935 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) 936 Ptr = GEPI->getOperand(0); 937 return (isa<ConstantPointerNull>(Ptr) && 938 !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace())); 939 } 940 941 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) { 942 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) { 943 const Value *GEPI0 = GEPI->getOperand(0); 944 if (isa<ConstantPointerNull>(GEPI0) && 945 !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace())) 946 return true; 947 } 948 if (isa<UndefValue>(Op) || 949 (isa<ConstantPointerNull>(Op) && 950 !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace()))) 951 return true; 952 return false; 953 } 954 955 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { 956 Value *Op = LI.getOperand(0); 957 958 // Try to canonicalize the loaded type. 959 if (Instruction *Res = combineLoadToOperationType(*this, LI)) 960 return Res; 961 962 // Attempt to improve the alignment. 963 unsigned KnownAlign = getOrEnforceKnownAlignment( 964 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT); 965 unsigned LoadAlign = LI.getAlignment(); 966 unsigned EffectiveLoadAlign = 967 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType()); 968 969 if (KnownAlign > EffectiveLoadAlign) 970 LI.setAlignment(MaybeAlign(KnownAlign)); 971 else if (LoadAlign == 0) 972 LI.setAlignment(MaybeAlign(EffectiveLoadAlign)); 973 974 // Replace GEP indices if possible. 975 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) { 976 Worklist.Add(NewGEPI); 977 return &LI; 978 } 979 980 if (Instruction *Res = unpackLoadToAggregate(*this, LI)) 981 return Res; 982 983 // Do really simple store-to-load forwarding and load CSE, to catch cases 984 // where there are several consecutive memory accesses to the same location, 985 // separated by a few arithmetic operations. 986 BasicBlock::iterator BBI(LI); 987 bool IsLoadCSE = false; 988 if (Value *AvailableVal = FindAvailableLoadedValue( 989 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) { 990 if (IsLoadCSE) 991 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false); 992 993 return replaceInstUsesWith( 994 LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(), 995 LI.getName() + ".cast")); 996 } 997 998 // None of the following transforms are legal for volatile/ordered atomic 999 // loads. Most of them do apply for unordered atomics. 1000 if (!LI.isUnordered()) return nullptr; 1001 1002 // load(gep null, ...) -> unreachable 1003 // load null/undef -> unreachable 1004 // TODO: Consider a target hook for valid address spaces for this xforms. 1005 if (canSimplifyNullLoadOrGEP(LI, Op)) { 1006 // Insert a new store to null instruction before the load to indicate 1007 // that this code is not reachable. We do this instead of inserting 1008 // an unreachable instruction directly because we cannot modify the 1009 // CFG. 1010 StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()), 1011 Constant::getNullValue(Op->getType()), &LI); 1012 SI->setDebugLoc(LI.getDebugLoc()); 1013 return replaceInstUsesWith(LI, UndefValue::get(LI.getType())); 1014 } 1015 1016 if (Op->hasOneUse()) { 1017 // Change select and PHI nodes to select values instead of addresses: this 1018 // helps alias analysis out a lot, allows many others simplifications, and 1019 // exposes redundancy in the code. 1020 // 1021 // Note that we cannot do the transformation unless we know that the 1022 // introduced loads cannot trap! Something like this is valid as long as 1023 // the condition is always false: load (select bool %C, int* null, int* %G), 1024 // but it would not be valid if we transformed it to load from null 1025 // unconditionally. 1026 // 1027 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) { 1028 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). 1029 const MaybeAlign Alignment(LI.getAlignment()); 1030 if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(), 1031 Alignment, DL, SI) && 1032 isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(), 1033 Alignment, DL, SI)) { 1034 LoadInst *V1 = 1035 Builder.CreateLoad(LI.getType(), SI->getOperand(1), 1036 SI->getOperand(1)->getName() + ".val"); 1037 LoadInst *V2 = 1038 Builder.CreateLoad(LI.getType(), SI->getOperand(2), 1039 SI->getOperand(2)->getName() + ".val"); 1040 assert(LI.isUnordered() && "implied by above"); 1041 V1->setAlignment(Alignment); 1042 V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 1043 V2->setAlignment(Alignment); 1044 V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); 1045 return SelectInst::Create(SI->getCondition(), V1, V2); 1046 } 1047 1048 // load (select (cond, null, P)) -> load P 1049 if (isa<ConstantPointerNull>(SI->getOperand(1)) && 1050 !NullPointerIsDefined(SI->getFunction(), 1051 LI.getPointerAddressSpace())) { 1052 LI.setOperand(0, SI->getOperand(2)); 1053 return &LI; 1054 } 1055 1056 // load (select (cond, P, null)) -> load P 1057 if (isa<ConstantPointerNull>(SI->getOperand(2)) && 1058 !NullPointerIsDefined(SI->getFunction(), 1059 LI.getPointerAddressSpace())) { 1060 LI.setOperand(0, SI->getOperand(1)); 1061 return &LI; 1062 } 1063 } 1064 } 1065 return nullptr; 1066 } 1067 1068 /// Look for extractelement/insertvalue sequence that acts like a bitcast. 1069 /// 1070 /// \returns underlying value that was "cast", or nullptr otherwise. 1071 /// 1072 /// For example, if we have: 1073 /// 1074 /// %E0 = extractelement <2 x double> %U, i32 0 1075 /// %V0 = insertvalue [2 x double] undef, double %E0, 0 1076 /// %E1 = extractelement <2 x double> %U, i32 1 1077 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1 1078 /// 1079 /// and the layout of a <2 x double> is isomorphic to a [2 x double], 1080 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U. 1081 /// Note that %U may contain non-undef values where %V1 has undef. 1082 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) { 1083 Value *U = nullptr; 1084 while (auto *IV = dyn_cast<InsertValueInst>(V)) { 1085 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand()); 1086 if (!E) 1087 return nullptr; 1088 auto *W = E->getVectorOperand(); 1089 if (!U) 1090 U = W; 1091 else if (U != W) 1092 return nullptr; 1093 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand()); 1094 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin()) 1095 return nullptr; 1096 V = IV->getAggregateOperand(); 1097 } 1098 if (!isa<UndefValue>(V) ||!U) 1099 return nullptr; 1100 1101 auto *UT = cast<VectorType>(U->getType()); 1102 auto *VT = V->getType(); 1103 // Check that types UT and VT are bitwise isomorphic. 1104 const auto &DL = IC.getDataLayout(); 1105 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) { 1106 return nullptr; 1107 } 1108 if (auto *AT = dyn_cast<ArrayType>(VT)) { 1109 if (AT->getNumElements() != UT->getNumElements()) 1110 return nullptr; 1111 } else { 1112 auto *ST = cast<StructType>(VT); 1113 if (ST->getNumElements() != UT->getNumElements()) 1114 return nullptr; 1115 for (const auto *EltT : ST->elements()) { 1116 if (EltT != UT->getElementType()) 1117 return nullptr; 1118 } 1119 } 1120 return U; 1121 } 1122 1123 /// Combine stores to match the type of value being stored. 1124 /// 1125 /// The core idea here is that the memory does not have any intrinsic type and 1126 /// where we can we should match the type of a store to the type of value being 1127 /// stored. 1128 /// 1129 /// However, this routine must never change the width of a store or the number of 1130 /// stores as that would introduce a semantic change. This combine is expected to 1131 /// be a semantic no-op which just allows stores to more closely model the types 1132 /// of their incoming values. 1133 /// 1134 /// Currently, we also refuse to change the precise type used for an atomic or 1135 /// volatile store. This is debatable, and might be reasonable to change later. 1136 /// However, it is risky in case some backend or other part of LLVM is relying 1137 /// on the exact type stored to select appropriate atomic operations. 1138 /// 1139 /// \returns true if the store was successfully combined away. This indicates 1140 /// the caller must erase the store instruction. We have to let the caller erase 1141 /// the store instruction as otherwise there is no way to signal whether it was 1142 /// combined or not: IC.EraseInstFromFunction returns a null pointer. 1143 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) { 1144 // FIXME: We could probably with some care handle both volatile and ordered 1145 // atomic stores here but it isn't clear that this is important. 1146 if (!SI.isUnordered()) 1147 return false; 1148 1149 // swifterror values can't be bitcasted. 1150 if (SI.getPointerOperand()->isSwiftError()) 1151 return false; 1152 1153 Value *V = SI.getValueOperand(); 1154 1155 // Fold away bit casts of the stored value by storing the original type. 1156 if (auto *BC = dyn_cast<BitCastInst>(V)) { 1157 V = BC->getOperand(0); 1158 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) { 1159 combineStoreToNewValue(IC, SI, V); 1160 return true; 1161 } 1162 } 1163 1164 if (Value *U = likeBitCastFromVector(IC, V)) 1165 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) { 1166 combineStoreToNewValue(IC, SI, U); 1167 return true; 1168 } 1169 1170 // FIXME: We should also canonicalize stores of vectors when their elements 1171 // are cast to other types. 1172 return false; 1173 } 1174 1175 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { 1176 // FIXME: We could probably with some care handle both volatile and atomic 1177 // stores here but it isn't clear that this is important. 1178 if (!SI.isSimple()) 1179 return false; 1180 1181 Value *V = SI.getValueOperand(); 1182 Type *T = V->getType(); 1183 1184 if (!T->isAggregateType()) 1185 return false; 1186 1187 if (auto *ST = dyn_cast<StructType>(T)) { 1188 // If the struct only have one element, we unpack. 1189 unsigned Count = ST->getNumElements(); 1190 if (Count == 1) { 1191 V = IC.Builder.CreateExtractValue(V, 0); 1192 combineStoreToNewValue(IC, SI, V); 1193 return true; 1194 } 1195 1196 // We don't want to break loads with padding here as we'd loose 1197 // the knowledge that padding exists for the rest of the pipeline. 1198 const DataLayout &DL = IC.getDataLayout(); 1199 auto *SL = DL.getStructLayout(ST); 1200 if (SL->hasPadding()) 1201 return false; 1202 1203 const auto Align = DL.getValueOrABITypeAlignment(SI.getAlign(), ST); 1204 1205 SmallString<16> EltName = V->getName(); 1206 EltName += ".elt"; 1207 auto *Addr = SI.getPointerOperand(); 1208 SmallString<16> AddrName = Addr->getName(); 1209 AddrName += ".repack"; 1210 1211 auto *IdxType = Type::getInt32Ty(ST->getContext()); 1212 auto *Zero = ConstantInt::get(IdxType, 0); 1213 for (unsigned i = 0; i < Count; i++) { 1214 Value *Indices[2] = { 1215 Zero, 1216 ConstantInt::get(IdxType, i), 1217 }; 1218 auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), 1219 AddrName); 1220 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName); 1221 auto EltAlign = commonAlignment(Align, SL->getElementOffset(i)); 1222 llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign); 1223 AAMDNodes AAMD; 1224 SI.getAAMetadata(AAMD); 1225 NS->setAAMetadata(AAMD); 1226 } 1227 1228 return true; 1229 } 1230 1231 if (auto *AT = dyn_cast<ArrayType>(T)) { 1232 // If the array only have one element, we unpack. 1233 auto NumElements = AT->getNumElements(); 1234 if (NumElements == 1) { 1235 V = IC.Builder.CreateExtractValue(V, 0); 1236 combineStoreToNewValue(IC, SI, V); 1237 return true; 1238 } 1239 1240 // Bail out if the array is too large. Ideally we would like to optimize 1241 // arrays of arbitrary size but this has a terrible impact on compile time. 1242 // The threshold here is chosen arbitrarily, maybe needs a little bit of 1243 // tuning. 1244 if (NumElements > IC.MaxArraySizeForCombine) 1245 return false; 1246 1247 const DataLayout &DL = IC.getDataLayout(); 1248 auto EltSize = DL.getTypeAllocSize(AT->getElementType()); 1249 const auto Align = DL.getValueOrABITypeAlignment(SI.getAlign(), T); 1250 1251 SmallString<16> EltName = V->getName(); 1252 EltName += ".elt"; 1253 auto *Addr = SI.getPointerOperand(); 1254 SmallString<16> AddrName = Addr->getName(); 1255 AddrName += ".repack"; 1256 1257 auto *IdxType = Type::getInt64Ty(T->getContext()); 1258 auto *Zero = ConstantInt::get(IdxType, 0); 1259 1260 uint64_t Offset = 0; 1261 for (uint64_t i = 0; i < NumElements; i++) { 1262 Value *Indices[2] = { 1263 Zero, 1264 ConstantInt::get(IdxType, i), 1265 }; 1266 auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), 1267 AddrName); 1268 auto *Val = IC.Builder.CreateExtractValue(V, i, EltName); 1269 auto EltAlign = commonAlignment(Align, Offset); 1270 Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign); 1271 AAMDNodes AAMD; 1272 SI.getAAMetadata(AAMD); 1273 NS->setAAMetadata(AAMD); 1274 Offset += EltSize; 1275 } 1276 1277 return true; 1278 } 1279 1280 return false; 1281 } 1282 1283 /// equivalentAddressValues - Test if A and B will obviously have the same 1284 /// value. This includes recognizing that %t0 and %t1 will have the same 1285 /// value in code like this: 1286 /// %t0 = getelementptr \@a, 0, 3 1287 /// store i32 0, i32* %t0 1288 /// %t1 = getelementptr \@a, 0, 3 1289 /// %t2 = load i32* %t1 1290 /// 1291 static bool equivalentAddressValues(Value *A, Value *B) { 1292 // Test if the values are trivially equivalent. 1293 if (A == B) return true; 1294 1295 // Test if the values come form identical arithmetic instructions. 1296 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because 1297 // its only used to compare two uses within the same basic block, which 1298 // means that they'll always either have the same value or one of them 1299 // will have an undefined value. 1300 if (isa<BinaryOperator>(A) || 1301 isa<CastInst>(A) || 1302 isa<PHINode>(A) || 1303 isa<GetElementPtrInst>(A)) 1304 if (Instruction *BI = dyn_cast<Instruction>(B)) 1305 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) 1306 return true; 1307 1308 // Otherwise they may not be equivalent. 1309 return false; 1310 } 1311 1312 /// Converts store (bitcast (load (bitcast (select ...)))) to 1313 /// store (load (select ...)), where select is minmax: 1314 /// select ((cmp load V1, load V2), V1, V2). 1315 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC, 1316 StoreInst &SI) { 1317 // bitcast? 1318 if (!match(SI.getPointerOperand(), m_BitCast(m_Value()))) 1319 return false; 1320 // load? integer? 1321 Value *LoadAddr; 1322 if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr))))) 1323 return false; 1324 auto *LI = cast<LoadInst>(SI.getValueOperand()); 1325 if (!LI->getType()->isIntegerTy()) 1326 return false; 1327 Type *CmpLoadTy; 1328 if (!isMinMaxWithLoads(LoadAddr, CmpLoadTy)) 1329 return false; 1330 1331 // Make sure we're not changing the size of the load/store. 1332 const auto &DL = IC.getDataLayout(); 1333 if (DL.getTypeStoreSizeInBits(LI->getType()) != 1334 DL.getTypeStoreSizeInBits(CmpLoadTy)) 1335 return false; 1336 1337 if (!all_of(LI->users(), [LI, LoadAddr](User *U) { 1338 auto *SI = dyn_cast<StoreInst>(U); 1339 return SI && SI->getPointerOperand() != LI && 1340 peekThroughBitcast(SI->getPointerOperand()) != LoadAddr && 1341 !SI->getPointerOperand()->isSwiftError(); 1342 })) 1343 return false; 1344 1345 IC.Builder.SetInsertPoint(LI); 1346 LoadInst *NewLI = IC.combineLoadToNewType(*LI, CmpLoadTy); 1347 // Replace all the stores with stores of the newly loaded value. 1348 for (auto *UI : LI->users()) { 1349 auto *USI = cast<StoreInst>(UI); 1350 IC.Builder.SetInsertPoint(USI); 1351 combineStoreToNewValue(IC, *USI, NewLI); 1352 } 1353 IC.replaceInstUsesWith(*LI, UndefValue::get(LI->getType())); 1354 IC.eraseInstFromFunction(*LI); 1355 return true; 1356 } 1357 1358 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { 1359 Value *Val = SI.getOperand(0); 1360 Value *Ptr = SI.getOperand(1); 1361 1362 // Try to canonicalize the stored type. 1363 if (combineStoreToValueType(*this, SI)) 1364 return eraseInstFromFunction(SI); 1365 1366 // Attempt to improve the alignment. 1367 const Align KnownAlign = Align(getOrEnforceKnownAlignment( 1368 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT)); 1369 const MaybeAlign StoreAlign = MaybeAlign(SI.getAlignment()); 1370 const Align EffectiveStoreAlign = 1371 StoreAlign ? *StoreAlign : Align(DL.getABITypeAlignment(Val->getType())); 1372 1373 if (KnownAlign > EffectiveStoreAlign) 1374 SI.setAlignment(KnownAlign); 1375 else if (!StoreAlign) 1376 SI.setAlignment(EffectiveStoreAlign); 1377 1378 // Try to canonicalize the stored type. 1379 if (unpackStoreToAggregate(*this, SI)) 1380 return eraseInstFromFunction(SI); 1381 1382 if (removeBitcastsFromLoadStoreOnMinMax(*this, SI)) 1383 return eraseInstFromFunction(SI); 1384 1385 // Replace GEP indices if possible. 1386 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) { 1387 Worklist.Add(NewGEPI); 1388 return &SI; 1389 } 1390 1391 // Don't hack volatile/ordered stores. 1392 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring. 1393 if (!SI.isUnordered()) return nullptr; 1394 1395 // If the RHS is an alloca with a single use, zapify the store, making the 1396 // alloca dead. 1397 if (Ptr->hasOneUse()) { 1398 if (isa<AllocaInst>(Ptr)) 1399 return eraseInstFromFunction(SI); 1400 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 1401 if (isa<AllocaInst>(GEP->getOperand(0))) { 1402 if (GEP->getOperand(0)->hasOneUse()) 1403 return eraseInstFromFunction(SI); 1404 } 1405 } 1406 } 1407 1408 // If we have a store to a location which is known constant, we can conclude 1409 // that the store must be storing the constant value (else the memory 1410 // wouldn't be constant), and this must be a noop. 1411 if (AA->pointsToConstantMemory(Ptr)) 1412 return eraseInstFromFunction(SI); 1413 1414 // Do really simple DSE, to catch cases where there are several consecutive 1415 // stores to the same location, separated by a few arithmetic operations. This 1416 // situation often occurs with bitfield accesses. 1417 BasicBlock::iterator BBI(SI); 1418 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts; 1419 --ScanInsts) { 1420 --BBI; 1421 // Don't count debug info directives, lest they affect codegen, 1422 // and we skip pointer-to-pointer bitcasts, which are NOPs. 1423 if (isa<DbgInfoIntrinsic>(BBI) || 1424 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { 1425 ScanInsts++; 1426 continue; 1427 } 1428 1429 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { 1430 // Prev store isn't volatile, and stores to the same location? 1431 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1), 1432 SI.getOperand(1))) { 1433 ++NumDeadStore; 1434 // Manually add back the original store to the worklist now, so it will 1435 // be processed after the operands of the removed store, as this may 1436 // expose additional DSE opportunities. 1437 Worklist.Add(&SI); 1438 eraseInstFromFunction(*PrevSI); 1439 return nullptr; 1440 } 1441 break; 1442 } 1443 1444 // If this is a load, we have to stop. However, if the loaded value is from 1445 // the pointer we're loading and is producing the pointer we're storing, 1446 // then *this* store is dead (X = load P; store X -> P). 1447 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { 1448 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) { 1449 assert(SI.isUnordered() && "can't eliminate ordering operation"); 1450 return eraseInstFromFunction(SI); 1451 } 1452 1453 // Otherwise, this is a load from some other location. Stores before it 1454 // may not be dead. 1455 break; 1456 } 1457 1458 // Don't skip over loads, throws or things that can modify memory. 1459 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow()) 1460 break; 1461 } 1462 1463 // store X, null -> turns into 'unreachable' in SimplifyCFG 1464 // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG 1465 if (canSimplifyNullStoreOrGEP(SI)) { 1466 if (!isa<UndefValue>(Val)) { 1467 SI.setOperand(0, UndefValue::get(Val->getType())); 1468 if (Instruction *U = dyn_cast<Instruction>(Val)) 1469 Worklist.Add(U); // Dropped a use. 1470 } 1471 return nullptr; // Do not modify these! 1472 } 1473 1474 // store undef, Ptr -> noop 1475 if (isa<UndefValue>(Val)) 1476 return eraseInstFromFunction(SI); 1477 1478 // If this store is the second-to-last instruction in the basic block 1479 // (excluding debug info and bitcasts of pointers) and if the block ends with 1480 // an unconditional branch, try to move the store to the successor block. 1481 BBI = SI.getIterator(); 1482 do { 1483 ++BBI; 1484 } while (isa<DbgInfoIntrinsic>(BBI) || 1485 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())); 1486 1487 if (BranchInst *BI = dyn_cast<BranchInst>(BBI)) 1488 if (BI->isUnconditional()) 1489 mergeStoreIntoSuccessor(SI); 1490 1491 return nullptr; 1492 } 1493 1494 /// Try to transform: 1495 /// if () { *P = v1; } else { *P = v2 } 1496 /// or: 1497 /// *P = v1; if () { *P = v2; } 1498 /// into a phi node with a store in the successor. 1499 bool InstCombiner::mergeStoreIntoSuccessor(StoreInst &SI) { 1500 assert(SI.isUnordered() && 1501 "This code has not been audited for volatile or ordered store case."); 1502 1503 // Check if the successor block has exactly 2 incoming edges. 1504 BasicBlock *StoreBB = SI.getParent(); 1505 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0); 1506 if (!DestBB->hasNPredecessors(2)) 1507 return false; 1508 1509 // Capture the other block (the block that doesn't contain our store). 1510 pred_iterator PredIter = pred_begin(DestBB); 1511 if (*PredIter == StoreBB) 1512 ++PredIter; 1513 BasicBlock *OtherBB = *PredIter; 1514 1515 // Bail out if all of the relevant blocks aren't distinct. This can happen, 1516 // for example, if SI is in an infinite loop. 1517 if (StoreBB == DestBB || OtherBB == DestBB) 1518 return false; 1519 1520 // Verify that the other block ends in a branch and is not otherwise empty. 1521 BasicBlock::iterator BBI(OtherBB->getTerminator()); 1522 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI); 1523 if (!OtherBr || BBI == OtherBB->begin()) 1524 return false; 1525 1526 // If the other block ends in an unconditional branch, check for the 'if then 1527 // else' case. There is an instruction before the branch. 1528 StoreInst *OtherStore = nullptr; 1529 if (OtherBr->isUnconditional()) { 1530 --BBI; 1531 // Skip over debugging info. 1532 while (isa<DbgInfoIntrinsic>(BBI) || 1533 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { 1534 if (BBI==OtherBB->begin()) 1535 return false; 1536 --BBI; 1537 } 1538 // If this isn't a store, isn't a store to the same location, or is not the 1539 // right kind of store, bail out. 1540 OtherStore = dyn_cast<StoreInst>(BBI); 1541 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) || 1542 !SI.isSameOperationAs(OtherStore)) 1543 return false; 1544 } else { 1545 // Otherwise, the other block ended with a conditional branch. If one of the 1546 // destinations is StoreBB, then we have the if/then case. 1547 if (OtherBr->getSuccessor(0) != StoreBB && 1548 OtherBr->getSuccessor(1) != StoreBB) 1549 return false; 1550 1551 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an 1552 // if/then triangle. See if there is a store to the same ptr as SI that 1553 // lives in OtherBB. 1554 for (;; --BBI) { 1555 // Check to see if we find the matching store. 1556 if ((OtherStore = dyn_cast<StoreInst>(BBI))) { 1557 if (OtherStore->getOperand(1) != SI.getOperand(1) || 1558 !SI.isSameOperationAs(OtherStore)) 1559 return false; 1560 break; 1561 } 1562 // If we find something that may be using or overwriting the stored 1563 // value, or if we run out of instructions, we can't do the transform. 1564 if (BBI->mayReadFromMemory() || BBI->mayThrow() || 1565 BBI->mayWriteToMemory() || BBI == OtherBB->begin()) 1566 return false; 1567 } 1568 1569 // In order to eliminate the store in OtherBr, we have to make sure nothing 1570 // reads or overwrites the stored value in StoreBB. 1571 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) { 1572 // FIXME: This should really be AA driven. 1573 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory()) 1574 return false; 1575 } 1576 } 1577 1578 // Insert a PHI node now if we need it. 1579 Value *MergedVal = OtherStore->getOperand(0); 1580 // The debug locations of the original instructions might differ. Merge them. 1581 DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(), 1582 OtherStore->getDebugLoc()); 1583 if (MergedVal != SI.getOperand(0)) { 1584 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge"); 1585 PN->addIncoming(SI.getOperand(0), SI.getParent()); 1586 PN->addIncoming(OtherStore->getOperand(0), OtherBB); 1587 MergedVal = InsertNewInstBefore(PN, DestBB->front()); 1588 PN->setDebugLoc(MergedLoc); 1589 } 1590 1591 // Advance to a place where it is safe to insert the new store and insert it. 1592 BBI = DestBB->getFirstInsertionPt(); 1593 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), 1594 MaybeAlign(SI.getAlignment()), 1595 SI.getOrdering(), SI.getSyncScopeID()); 1596 InsertNewInstBefore(NewSI, *BBI); 1597 NewSI->setDebugLoc(MergedLoc); 1598 1599 // If the two stores had AA tags, merge them. 1600 AAMDNodes AATags; 1601 SI.getAAMetadata(AATags); 1602 if (AATags) { 1603 OtherStore->getAAMetadata(AATags, /* Merge = */ true); 1604 NewSI->setAAMetadata(AATags); 1605 } 1606 1607 // Nuke the old stores. 1608 eraseInstFromFunction(SI); 1609 eraseInstFromFunction(*OtherStore); 1610 return true; 1611 } 1612