1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visit functions for load, store and alloca. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/MapVector.h" 16 #include "llvm/ADT/SmallString.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/Loads.h" 19 #include "llvm/IR/ConstantRange.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/DebugInfo.h" 22 #include "llvm/IR/IntrinsicInst.h" 23 #include "llvm/IR/LLVMContext.h" 24 #include "llvm/IR/MDBuilder.h" 25 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 26 #include "llvm/Transforms/Utils/Local.h" 27 using namespace llvm; 28 29 #define DEBUG_TYPE "instcombine" 30 31 STATISTIC(NumDeadStore, "Number of dead stores eliminated"); 32 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global"); 33 34 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to 35 /// some part of a constant global variable. This intentionally only accepts 36 /// constant expressions because we can't rewrite arbitrary instructions. 37 static bool pointsToConstantGlobal(Value *V) { 38 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 39 return GV->isConstant(); 40 41 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 42 if (CE->getOpcode() == Instruction::BitCast || 43 CE->getOpcode() == Instruction::AddrSpaceCast || 44 CE->getOpcode() == Instruction::GetElementPtr) 45 return pointsToConstantGlobal(CE->getOperand(0)); 46 } 47 return false; 48 } 49 50 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) 51 /// pointer to an alloca. Ignore any reads of the pointer, return false if we 52 /// see any stores or other unknown uses. If we see pointer arithmetic, keep 53 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse 54 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to 55 /// the alloca, and if the source pointer is a pointer to a constant global, we 56 /// can optimize this. 57 static bool 58 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, 59 SmallVectorImpl<Instruction *> &ToDelete) { 60 // We track lifetime intrinsics as we encounter them. If we decide to go 61 // ahead and replace the value with the global, this lets the caller quickly 62 // eliminate the markers. 63 64 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect; 65 ValuesToInspect.emplace_back(V, false); 66 while (!ValuesToInspect.empty()) { 67 auto ValuePair = ValuesToInspect.pop_back_val(); 68 const bool IsOffset = ValuePair.second; 69 for (auto &U : ValuePair.first->uses()) { 70 auto *I = cast<Instruction>(U.getUser()); 71 72 if (auto *LI = dyn_cast<LoadInst>(I)) { 73 // Ignore non-volatile loads, they are always ok. 74 if (!LI->isSimple()) return false; 75 continue; 76 } 77 78 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) { 79 // If uses of the bitcast are ok, we are ok. 80 ValuesToInspect.emplace_back(I, IsOffset); 81 continue; 82 } 83 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 84 // If the GEP has all zero indices, it doesn't offset the pointer. If it 85 // doesn't, it does. 86 ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices()); 87 continue; 88 } 89 90 if (auto CS = CallSite(I)) { 91 // If this is the function being called then we treat it like a load and 92 // ignore it. 93 if (CS.isCallee(&U)) 94 continue; 95 96 unsigned DataOpNo = CS.getDataOperandNo(&U); 97 bool IsArgOperand = CS.isArgOperand(&U); 98 99 // Inalloca arguments are clobbered by the call. 100 if (IsArgOperand && CS.isInAllocaArgument(DataOpNo)) 101 return false; 102 103 // If this is a readonly/readnone call site, then we know it is just a 104 // load (but one that potentially returns the value itself), so we can 105 // ignore it if we know that the value isn't captured. 106 if (CS.onlyReadsMemory() && 107 (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo))) 108 continue; 109 110 // If this is being passed as a byval argument, the caller is making a 111 // copy, so it is only a read of the alloca. 112 if (IsArgOperand && CS.isByValArgument(DataOpNo)) 113 continue; 114 } 115 116 // Lifetime intrinsics can be handled by the caller. 117 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 118 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 119 II->getIntrinsicID() == Intrinsic::lifetime_end) { 120 assert(II->use_empty() && "Lifetime markers have no result to use!"); 121 ToDelete.push_back(II); 122 continue; 123 } 124 } 125 126 // If this is isn't our memcpy/memmove, reject it as something we can't 127 // handle. 128 MemTransferInst *MI = dyn_cast<MemTransferInst>(I); 129 if (!MI) 130 return false; 131 132 // If the transfer is using the alloca as a source of the transfer, then 133 // ignore it since it is a load (unless the transfer is volatile). 134 if (U.getOperandNo() == 1) { 135 if (MI->isVolatile()) return false; 136 continue; 137 } 138 139 // If we already have seen a copy, reject the second one. 140 if (TheCopy) return false; 141 142 // If the pointer has been offset from the start of the alloca, we can't 143 // safely handle this. 144 if (IsOffset) return false; 145 146 // If the memintrinsic isn't using the alloca as the dest, reject it. 147 if (U.getOperandNo() != 0) return false; 148 149 // If the source of the memcpy/move is not a constant global, reject it. 150 if (!pointsToConstantGlobal(MI->getSource())) 151 return false; 152 153 // Otherwise, the transform is safe. Remember the copy instruction. 154 TheCopy = MI; 155 } 156 } 157 return true; 158 } 159 160 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only 161 /// modified by a copy from a constant global. If we can prove this, we can 162 /// replace any uses of the alloca with uses of the global directly. 163 static MemTransferInst * 164 isOnlyCopiedFromConstantGlobal(AllocaInst *AI, 165 SmallVectorImpl<Instruction *> &ToDelete) { 166 MemTransferInst *TheCopy = nullptr; 167 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) 168 return TheCopy; 169 return nullptr; 170 } 171 172 /// Returns true if V is dereferenceable for size of alloca. 173 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI, 174 const DataLayout &DL) { 175 if (AI->isArrayAllocation()) 176 return false; 177 uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType()); 178 if (!AllocaSize) 179 return false; 180 return isDereferenceableAndAlignedPointer(V, AI->getAlignment(), 181 APInt(64, AllocaSize), DL); 182 } 183 184 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) { 185 // Check for array size of 1 (scalar allocation). 186 if (!AI.isArrayAllocation()) { 187 // i32 1 is the canonical array size for scalar allocations. 188 if (AI.getArraySize()->getType()->isIntegerTy(32)) 189 return nullptr; 190 191 // Canonicalize it. 192 Value *V = IC.Builder->getInt32(1); 193 AI.setOperand(0, V); 194 return &AI; 195 } 196 197 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 198 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { 199 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); 200 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName()); 201 New->setAlignment(AI.getAlignment()); 202 203 // Scan to the end of the allocation instructions, to skip over a block of 204 // allocas if possible...also skip interleaved debug info 205 // 206 BasicBlock::iterator It(New); 207 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) 208 ++It; 209 210 // Now that I is pointing to the first non-allocation-inst in the block, 211 // insert our getelementptr instruction... 212 // 213 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType()); 214 Value *NullIdx = Constant::getNullValue(IdxTy); 215 Value *Idx[2] = {NullIdx, NullIdx}; 216 Instruction *GEP = 217 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub"); 218 IC.InsertNewInstBefore(GEP, *It); 219 220 // Now make everything use the getelementptr instead of the original 221 // allocation. 222 return IC.replaceInstUsesWith(AI, GEP); 223 } 224 225 if (isa<UndefValue>(AI.getArraySize())) 226 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); 227 228 // Ensure that the alloca array size argument has type intptr_t, so that 229 // any casting is exposed early. 230 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType()); 231 if (AI.getArraySize()->getType() != IntPtrTy) { 232 Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false); 233 AI.setOperand(0, V); 234 return &AI; 235 } 236 237 return nullptr; 238 } 239 240 namespace { 241 // If I and V are pointers in different address space, it is not allowed to 242 // use replaceAllUsesWith since I and V have different types. A 243 // non-target-specific transformation should not use addrspacecast on V since 244 // the two address space may be disjoint depending on target. 245 // 246 // This class chases down uses of the old pointer until reaching the load 247 // instructions, then replaces the old pointer in the load instructions with 248 // the new pointer. If during the chasing it sees bitcast or GEP, it will 249 // create new bitcast or GEP with the new pointer and use them in the load 250 // instruction. 251 class PointerReplacer { 252 public: 253 PointerReplacer(InstCombiner &IC) : IC(IC) {} 254 void replacePointer(Instruction &I, Value *V); 255 256 private: 257 void findLoadAndReplace(Instruction &I); 258 void replace(Instruction *I); 259 Value *getReplacement(Value *I); 260 261 SmallVector<Instruction *, 4> Path; 262 MapVector<Value *, Value *> WorkMap; 263 InstCombiner &IC; 264 }; 265 } // end anonymous namespace 266 267 void PointerReplacer::findLoadAndReplace(Instruction &I) { 268 for (auto U : I.users()) { 269 auto *Inst = dyn_cast<Instruction>(&*U); 270 if (!Inst) 271 return; 272 DEBUG(dbgs() << "Found pointer user: " << *U << '\n'); 273 if (isa<LoadInst>(Inst)) { 274 for (auto P : Path) 275 replace(P); 276 replace(Inst); 277 } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) { 278 Path.push_back(Inst); 279 findLoadAndReplace(*Inst); 280 Path.pop_back(); 281 } else { 282 return; 283 } 284 } 285 } 286 287 Value *PointerReplacer::getReplacement(Value *V) { 288 auto Loc = WorkMap.find(V); 289 if (Loc != WorkMap.end()) 290 return Loc->second; 291 return nullptr; 292 } 293 294 void PointerReplacer::replace(Instruction *I) { 295 if (getReplacement(I)) 296 return; 297 298 if (auto *LT = dyn_cast<LoadInst>(I)) { 299 auto *V = getReplacement(LT->getPointerOperand()); 300 assert(V && "Operand not replaced"); 301 auto *NewI = new LoadInst(V); 302 NewI->takeName(LT); 303 IC.InsertNewInstWith(NewI, *LT); 304 IC.replaceInstUsesWith(*LT, NewI); 305 WorkMap[LT] = NewI; 306 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 307 auto *V = getReplacement(GEP->getPointerOperand()); 308 assert(V && "Operand not replaced"); 309 SmallVector<Value *, 8> Indices; 310 Indices.append(GEP->idx_begin(), GEP->idx_end()); 311 auto *NewI = GetElementPtrInst::Create( 312 V->getType()->getPointerElementType(), V, Indices); 313 IC.InsertNewInstWith(NewI, *GEP); 314 NewI->takeName(GEP); 315 WorkMap[GEP] = NewI; 316 } else if (auto *BC = dyn_cast<BitCastInst>(I)) { 317 auto *V = getReplacement(BC->getOperand(0)); 318 assert(V && "Operand not replaced"); 319 auto *NewT = PointerType::get(BC->getType()->getPointerElementType(), 320 V->getType()->getPointerAddressSpace()); 321 auto *NewI = new BitCastInst(V, NewT); 322 IC.InsertNewInstWith(NewI, *BC); 323 NewI->takeName(BC); 324 WorkMap[BC] = NewI; 325 } else { 326 llvm_unreachable("should never reach here"); 327 } 328 } 329 330 void PointerReplacer::replacePointer(Instruction &I, Value *V) { 331 #ifndef NDEBUG 332 auto *PT = cast<PointerType>(I.getType()); 333 auto *NT = cast<PointerType>(V->getType()); 334 assert(PT != NT && PT->getElementType() == NT->getElementType() && 335 "Invalid usage"); 336 #endif 337 WorkMap[&I] = V; 338 findLoadAndReplace(I); 339 } 340 341 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { 342 if (auto *I = simplifyAllocaArraySize(*this, AI)) 343 return I; 344 345 if (AI.getAllocatedType()->isSized()) { 346 // If the alignment is 0 (unspecified), assign it the preferred alignment. 347 if (AI.getAlignment() == 0) 348 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType())); 349 350 // Move all alloca's of zero byte objects to the entry block and merge them 351 // together. Note that we only do this for alloca's, because malloc should 352 // allocate and return a unique pointer, even for a zero byte allocation. 353 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) { 354 // For a zero sized alloca there is no point in doing an array allocation. 355 // This is helpful if the array size is a complicated expression not used 356 // elsewhere. 357 if (AI.isArrayAllocation()) { 358 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1)); 359 return &AI; 360 } 361 362 // Get the first instruction in the entry block. 363 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock(); 364 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg(); 365 if (FirstInst != &AI) { 366 // If the entry block doesn't start with a zero-size alloca then move 367 // this one to the start of the entry block. There is no problem with 368 // dominance as the array size was forced to a constant earlier already. 369 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst); 370 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() || 371 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) { 372 AI.moveBefore(FirstInst); 373 return &AI; 374 } 375 376 // If the alignment of the entry block alloca is 0 (unspecified), 377 // assign it the preferred alignment. 378 if (EntryAI->getAlignment() == 0) 379 EntryAI->setAlignment( 380 DL.getPrefTypeAlignment(EntryAI->getAllocatedType())); 381 // Replace this zero-sized alloca with the one at the start of the entry 382 // block after ensuring that the address will be aligned enough for both 383 // types. 384 unsigned MaxAlign = std::max(EntryAI->getAlignment(), 385 AI.getAlignment()); 386 EntryAI->setAlignment(MaxAlign); 387 if (AI.getType() != EntryAI->getType()) 388 return new BitCastInst(EntryAI, AI.getType()); 389 return replaceInstUsesWith(AI, EntryAI); 390 } 391 } 392 } 393 394 if (AI.getAlignment()) { 395 // Check to see if this allocation is only modified by a memcpy/memmove from 396 // a constant global whose alignment is equal to or exceeds that of the 397 // allocation. If this is the case, we can change all users to use 398 // the constant global instead. This is commonly produced by the CFE by 399 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' 400 // is only subsequently read. 401 SmallVector<Instruction *, 4> ToDelete; 402 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { 403 unsigned SourceAlign = getOrEnforceKnownAlignment( 404 Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT); 405 if (AI.getAlignment() <= SourceAlign && 406 isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) { 407 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); 408 DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); 409 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) 410 eraseInstFromFunction(*ToDelete[i]); 411 Constant *TheSrc = cast<Constant>(Copy->getSource()); 412 auto *SrcTy = TheSrc->getType(); 413 auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(), 414 SrcTy->getPointerAddressSpace()); 415 Constant *Cast = 416 ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, DestTy); 417 if (AI.getType()->getPointerAddressSpace() == 418 SrcTy->getPointerAddressSpace()) { 419 Instruction *NewI = replaceInstUsesWith(AI, Cast); 420 eraseInstFromFunction(*Copy); 421 ++NumGlobalCopies; 422 return NewI; 423 } else { 424 PointerReplacer PtrReplacer(*this); 425 PtrReplacer.replacePointer(AI, Cast); 426 ++NumGlobalCopies; 427 } 428 } 429 } 430 } 431 432 // At last, use the generic allocation site handler to aggressively remove 433 // unused allocas. 434 return visitAllocSite(AI); 435 } 436 437 // Are we allowed to form a atomic load or store of this type? 438 static bool isSupportedAtomicType(Type *Ty) { 439 return Ty->isIntegerTy() || Ty->isPointerTy() || Ty->isFloatingPointTy(); 440 } 441 442 /// \brief Helper to combine a load to a new type. 443 /// 444 /// This just does the work of combining a load to a new type. It handles 445 /// metadata, etc., and returns the new instruction. The \c NewTy should be the 446 /// loaded *value* type. This will convert it to a pointer, cast the operand to 447 /// that pointer type, load it, etc. 448 /// 449 /// Note that this will create all of the instructions with whatever insert 450 /// point the \c InstCombiner currently is using. 451 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy, 452 const Twine &Suffix = "") { 453 assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) && 454 "can't fold an atomic load to requested type"); 455 456 Value *Ptr = LI.getPointerOperand(); 457 unsigned AS = LI.getPointerAddressSpace(); 458 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 459 LI.getAllMetadata(MD); 460 461 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad( 462 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)), 463 LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix); 464 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope()); 465 MDBuilder MDB(NewLoad->getContext()); 466 for (const auto &MDPair : MD) { 467 unsigned ID = MDPair.first; 468 MDNode *N = MDPair.second; 469 // Note, essentially every kind of metadata should be preserved here! This 470 // routine is supposed to clone a load instruction changing *only its type*. 471 // The only metadata it makes sense to drop is metadata which is invalidated 472 // when the pointer type changes. This should essentially never be the case 473 // in LLVM, but we explicitly switch over only known metadata to be 474 // conservatively correct. If you are adding metadata to LLVM which pertains 475 // to loads, you almost certainly want to add it here. 476 switch (ID) { 477 case LLVMContext::MD_dbg: 478 case LLVMContext::MD_tbaa: 479 case LLVMContext::MD_prof: 480 case LLVMContext::MD_fpmath: 481 case LLVMContext::MD_tbaa_struct: 482 case LLVMContext::MD_invariant_load: 483 case LLVMContext::MD_alias_scope: 484 case LLVMContext::MD_noalias: 485 case LLVMContext::MD_nontemporal: 486 case LLVMContext::MD_mem_parallel_loop_access: 487 // All of these directly apply. 488 NewLoad->setMetadata(ID, N); 489 break; 490 491 case LLVMContext::MD_nonnull: 492 copyNonnullMetadata(LI, N, *NewLoad); 493 break; 494 case LLVMContext::MD_align: 495 case LLVMContext::MD_dereferenceable: 496 case LLVMContext::MD_dereferenceable_or_null: 497 // These only directly apply if the new type is also a pointer. 498 if (NewTy->isPointerTy()) 499 NewLoad->setMetadata(ID, N); 500 break; 501 case LLVMContext::MD_range: 502 copyRangeMetadata(IC.getDataLayout(), LI, N, *NewLoad); 503 break; 504 } 505 } 506 return NewLoad; 507 } 508 509 /// \brief Combine a store to a new type. 510 /// 511 /// Returns the newly created store instruction. 512 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) { 513 assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) && 514 "can't fold an atomic store of requested type"); 515 516 Value *Ptr = SI.getPointerOperand(); 517 unsigned AS = SI.getPointerAddressSpace(); 518 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 519 SI.getAllMetadata(MD); 520 521 StoreInst *NewStore = IC.Builder->CreateAlignedStore( 522 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)), 523 SI.getAlignment(), SI.isVolatile()); 524 NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope()); 525 for (const auto &MDPair : MD) { 526 unsigned ID = MDPair.first; 527 MDNode *N = MDPair.second; 528 // Note, essentially every kind of metadata should be preserved here! This 529 // routine is supposed to clone a store instruction changing *only its 530 // type*. The only metadata it makes sense to drop is metadata which is 531 // invalidated when the pointer type changes. This should essentially 532 // never be the case in LLVM, but we explicitly switch over only known 533 // metadata to be conservatively correct. If you are adding metadata to 534 // LLVM which pertains to stores, you almost certainly want to add it 535 // here. 536 switch (ID) { 537 case LLVMContext::MD_dbg: 538 case LLVMContext::MD_tbaa: 539 case LLVMContext::MD_prof: 540 case LLVMContext::MD_fpmath: 541 case LLVMContext::MD_tbaa_struct: 542 case LLVMContext::MD_alias_scope: 543 case LLVMContext::MD_noalias: 544 case LLVMContext::MD_nontemporal: 545 case LLVMContext::MD_mem_parallel_loop_access: 546 // All of these directly apply. 547 NewStore->setMetadata(ID, N); 548 break; 549 550 case LLVMContext::MD_invariant_load: 551 case LLVMContext::MD_nonnull: 552 case LLVMContext::MD_range: 553 case LLVMContext::MD_align: 554 case LLVMContext::MD_dereferenceable: 555 case LLVMContext::MD_dereferenceable_or_null: 556 // These don't apply for stores. 557 break; 558 } 559 } 560 561 return NewStore; 562 } 563 564 /// \brief Combine loads to match the type of their uses' value after looking 565 /// through intervening bitcasts. 566 /// 567 /// The core idea here is that if the result of a load is used in an operation, 568 /// we should load the type most conducive to that operation. For example, when 569 /// loading an integer and converting that immediately to a pointer, we should 570 /// instead directly load a pointer. 571 /// 572 /// However, this routine must never change the width of a load or the number of 573 /// loads as that would introduce a semantic change. This combine is expected to 574 /// be a semantic no-op which just allows loads to more closely model the types 575 /// of their consuming operations. 576 /// 577 /// Currently, we also refuse to change the precise type used for an atomic load 578 /// or a volatile load. This is debatable, and might be reasonable to change 579 /// later. However, it is risky in case some backend or other part of LLVM is 580 /// relying on the exact type loaded to select appropriate atomic operations. 581 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) { 582 // FIXME: We could probably with some care handle both volatile and ordered 583 // atomic loads here but it isn't clear that this is important. 584 if (!LI.isUnordered()) 585 return nullptr; 586 587 if (LI.use_empty()) 588 return nullptr; 589 590 // swifterror values can't be bitcasted. 591 if (LI.getPointerOperand()->isSwiftError()) 592 return nullptr; 593 594 Type *Ty = LI.getType(); 595 const DataLayout &DL = IC.getDataLayout(); 596 597 // Try to canonicalize loads which are only ever stored to operate over 598 // integers instead of any other type. We only do this when the loaded type 599 // is sized and has a size exactly the same as its store size and the store 600 // size is a legal integer type. 601 if (!Ty->isIntegerTy() && Ty->isSized() && 602 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) && 603 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) && 604 !DL.isNonIntegralPointerType(Ty)) { 605 if (all_of(LI.users(), [&LI](User *U) { 606 auto *SI = dyn_cast<StoreInst>(U); 607 return SI && SI->getPointerOperand() != &LI && 608 !SI->getPointerOperand()->isSwiftError(); 609 })) { 610 LoadInst *NewLoad = combineLoadToNewType( 611 IC, LI, 612 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty))); 613 // Replace all the stores with stores of the newly loaded value. 614 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) { 615 auto *SI = cast<StoreInst>(*UI++); 616 IC.Builder->SetInsertPoint(SI); 617 combineStoreToNewValue(IC, *SI, NewLoad); 618 IC.eraseInstFromFunction(*SI); 619 } 620 assert(LI.use_empty() && "Failed to remove all users of the load!"); 621 // Return the old load so the combiner can delete it safely. 622 return &LI; 623 } 624 } 625 626 // Fold away bit casts of the loaded value by loading the desired type. 627 // We can do this for BitCastInsts as well as casts from and to pointer types, 628 // as long as those are noops (i.e., the source or dest type have the same 629 // bitwidth as the target's pointers). 630 if (LI.hasOneUse()) 631 if (auto* CI = dyn_cast<CastInst>(LI.user_back())) 632 if (CI->isNoopCast(DL)) 633 if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) { 634 LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy()); 635 CI->replaceAllUsesWith(NewLoad); 636 IC.eraseInstFromFunction(*CI); 637 return &LI; 638 } 639 640 // FIXME: We should also canonicalize loads of vectors when their elements are 641 // cast to other types. 642 return nullptr; 643 } 644 645 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) { 646 // FIXME: We could probably with some care handle both volatile and atomic 647 // stores here but it isn't clear that this is important. 648 if (!LI.isSimple()) 649 return nullptr; 650 651 Type *T = LI.getType(); 652 if (!T->isAggregateType()) 653 return nullptr; 654 655 StringRef Name = LI.getName(); 656 assert(LI.getAlignment() && "Alignment must be set at this point"); 657 658 if (auto *ST = dyn_cast<StructType>(T)) { 659 // If the struct only have one element, we unpack. 660 auto NumElements = ST->getNumElements(); 661 if (NumElements == 1) { 662 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U), 663 ".unpack"); 664 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue( 665 UndefValue::get(T), NewLoad, 0, Name)); 666 } 667 668 // We don't want to break loads with padding here as we'd loose 669 // the knowledge that padding exists for the rest of the pipeline. 670 const DataLayout &DL = IC.getDataLayout(); 671 auto *SL = DL.getStructLayout(ST); 672 if (SL->hasPadding()) 673 return nullptr; 674 675 auto Align = LI.getAlignment(); 676 if (!Align) 677 Align = DL.getABITypeAlignment(ST); 678 679 auto *Addr = LI.getPointerOperand(); 680 auto *IdxType = Type::getInt32Ty(T->getContext()); 681 auto *Zero = ConstantInt::get(IdxType, 0); 682 683 Value *V = UndefValue::get(T); 684 for (unsigned i = 0; i < NumElements; i++) { 685 Value *Indices[2] = { 686 Zero, 687 ConstantInt::get(IdxType, i), 688 }; 689 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), 690 Name + ".elt"); 691 auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); 692 auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack"); 693 V = IC.Builder->CreateInsertValue(V, L, i); 694 } 695 696 V->setName(Name); 697 return IC.replaceInstUsesWith(LI, V); 698 } 699 700 if (auto *AT = dyn_cast<ArrayType>(T)) { 701 auto *ET = AT->getElementType(); 702 auto NumElements = AT->getNumElements(); 703 if (NumElements == 1) { 704 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack"); 705 return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue( 706 UndefValue::get(T), NewLoad, 0, Name)); 707 } 708 709 // Bail out if the array is too large. Ideally we would like to optimize 710 // arrays of arbitrary size but this has a terrible impact on compile time. 711 // The threshold here is chosen arbitrarily, maybe needs a little bit of 712 // tuning. 713 if (NumElements > IC.MaxArraySizeForCombine) 714 return nullptr; 715 716 const DataLayout &DL = IC.getDataLayout(); 717 auto EltSize = DL.getTypeAllocSize(ET); 718 auto Align = LI.getAlignment(); 719 if (!Align) 720 Align = DL.getABITypeAlignment(T); 721 722 auto *Addr = LI.getPointerOperand(); 723 auto *IdxType = Type::getInt64Ty(T->getContext()); 724 auto *Zero = ConstantInt::get(IdxType, 0); 725 726 Value *V = UndefValue::get(T); 727 uint64_t Offset = 0; 728 for (uint64_t i = 0; i < NumElements; i++) { 729 Value *Indices[2] = { 730 Zero, 731 ConstantInt::get(IdxType, i), 732 }; 733 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), 734 Name + ".elt"); 735 auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset), 736 Name + ".unpack"); 737 V = IC.Builder->CreateInsertValue(V, L, i); 738 Offset += EltSize; 739 } 740 741 V->setName(Name); 742 return IC.replaceInstUsesWith(LI, V); 743 } 744 745 return nullptr; 746 } 747 748 // If we can determine that all possible objects pointed to by the provided 749 // pointer value are, not only dereferenceable, but also definitively less than 750 // or equal to the provided maximum size, then return true. Otherwise, return 751 // false (constant global values and allocas fall into this category). 752 // 753 // FIXME: This should probably live in ValueTracking (or similar). 754 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize, 755 const DataLayout &DL) { 756 SmallPtrSet<Value *, 4> Visited; 757 SmallVector<Value *, 4> Worklist(1, V); 758 759 do { 760 Value *P = Worklist.pop_back_val(); 761 P = P->stripPointerCasts(); 762 763 if (!Visited.insert(P).second) 764 continue; 765 766 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 767 Worklist.push_back(SI->getTrueValue()); 768 Worklist.push_back(SI->getFalseValue()); 769 continue; 770 } 771 772 if (PHINode *PN = dyn_cast<PHINode>(P)) { 773 for (Value *IncValue : PN->incoming_values()) 774 Worklist.push_back(IncValue); 775 continue; 776 } 777 778 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) { 779 if (GA->isInterposable()) 780 return false; 781 Worklist.push_back(GA->getAliasee()); 782 continue; 783 } 784 785 // If we know how big this object is, and it is less than MaxSize, continue 786 // searching. Otherwise, return false. 787 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) { 788 if (!AI->getAllocatedType()->isSized()) 789 return false; 790 791 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize()); 792 if (!CS) 793 return false; 794 795 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType()); 796 // Make sure that, even if the multiplication below would wrap as an 797 // uint64_t, we still do the right thing. 798 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize)) 799 return false; 800 continue; 801 } 802 803 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 804 if (!GV->hasDefinitiveInitializer() || !GV->isConstant()) 805 return false; 806 807 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType()); 808 if (InitSize > MaxSize) 809 return false; 810 continue; 811 } 812 813 return false; 814 } while (!Worklist.empty()); 815 816 return true; 817 } 818 819 // If we're indexing into an object of a known size, and the outer index is 820 // not a constant, but having any value but zero would lead to undefined 821 // behavior, replace it with zero. 822 // 823 // For example, if we have: 824 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4 825 // ... 826 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x 827 // ... = load i32* %arrayidx, align 4 828 // Then we know that we can replace %x in the GEP with i64 0. 829 // 830 // FIXME: We could fold any GEP index to zero that would cause UB if it were 831 // not zero. Currently, we only handle the first such index. Also, we could 832 // also search through non-zero constant indices if we kept track of the 833 // offsets those indices implied. 834 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI, 835 Instruction *MemI, unsigned &Idx) { 836 if (GEPI->getNumOperands() < 2) 837 return false; 838 839 // Find the first non-zero index of a GEP. If all indices are zero, return 840 // one past the last index. 841 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) { 842 unsigned I = 1; 843 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) { 844 Value *V = GEPI->getOperand(I); 845 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) 846 if (CI->isZero()) 847 continue; 848 849 break; 850 } 851 852 return I; 853 }; 854 855 // Skip through initial 'zero' indices, and find the corresponding pointer 856 // type. See if the next index is not a constant. 857 Idx = FirstNZIdx(GEPI); 858 if (Idx == GEPI->getNumOperands()) 859 return false; 860 if (isa<Constant>(GEPI->getOperand(Idx))) 861 return false; 862 863 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx); 864 Type *AllocTy = 865 GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops); 866 if (!AllocTy || !AllocTy->isSized()) 867 return false; 868 const DataLayout &DL = IC.getDataLayout(); 869 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy); 870 871 // If there are more indices after the one we might replace with a zero, make 872 // sure they're all non-negative. If any of them are negative, the overall 873 // address being computed might be before the base address determined by the 874 // first non-zero index. 875 auto IsAllNonNegative = [&]() { 876 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) { 877 KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI); 878 if (Known.isNonNegative()) 879 continue; 880 return false; 881 } 882 883 return true; 884 }; 885 886 // FIXME: If the GEP is not inbounds, and there are extra indices after the 887 // one we'll replace, those could cause the address computation to wrap 888 // (rendering the IsAllNonNegative() check below insufficient). We can do 889 // better, ignoring zero indices (and other indices we can prove small 890 // enough not to wrap). 891 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds()) 892 return false; 893 894 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is 895 // also known to be dereferenceable. 896 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) && 897 IsAllNonNegative(); 898 } 899 900 // If we're indexing into an object with a variable index for the memory 901 // access, but the object has only one element, we can assume that the index 902 // will always be zero. If we replace the GEP, return it. 903 template <typename T> 904 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr, 905 T &MemI) { 906 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) { 907 unsigned Idx; 908 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) { 909 Instruction *NewGEPI = GEPI->clone(); 910 NewGEPI->setOperand(Idx, 911 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0)); 912 NewGEPI->insertBefore(GEPI); 913 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI); 914 return NewGEPI; 915 } 916 } 917 918 return nullptr; 919 } 920 921 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) { 922 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) { 923 const Value *GEPI0 = GEPI->getOperand(0); 924 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0) 925 return true; 926 } 927 if (isa<UndefValue>(Op) || 928 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) 929 return true; 930 return false; 931 } 932 933 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { 934 Value *Op = LI.getOperand(0); 935 936 // Try to canonicalize the loaded type. 937 if (Instruction *Res = combineLoadToOperationType(*this, LI)) 938 return Res; 939 940 // Attempt to improve the alignment. 941 unsigned KnownAlign = getOrEnforceKnownAlignment( 942 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT); 943 unsigned LoadAlign = LI.getAlignment(); 944 unsigned EffectiveLoadAlign = 945 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType()); 946 947 if (KnownAlign > EffectiveLoadAlign) 948 LI.setAlignment(KnownAlign); 949 else if (LoadAlign == 0) 950 LI.setAlignment(EffectiveLoadAlign); 951 952 // Replace GEP indices if possible. 953 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) { 954 Worklist.Add(NewGEPI); 955 return &LI; 956 } 957 958 if (Instruction *Res = unpackLoadToAggregate(*this, LI)) 959 return Res; 960 961 // Do really simple store-to-load forwarding and load CSE, to catch cases 962 // where there are several consecutive memory accesses to the same location, 963 // separated by a few arithmetic operations. 964 BasicBlock::iterator BBI(LI); 965 bool IsLoadCSE = false; 966 if (Value *AvailableVal = FindAvailableLoadedValue( 967 &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) { 968 if (IsLoadCSE) 969 combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI); 970 971 return replaceInstUsesWith( 972 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(), 973 LI.getName() + ".cast")); 974 } 975 976 // None of the following transforms are legal for volatile/ordered atomic 977 // loads. Most of them do apply for unordered atomics. 978 if (!LI.isUnordered()) return nullptr; 979 980 // load(gep null, ...) -> unreachable 981 // load null/undef -> unreachable 982 // TODO: Consider a target hook for valid address spaces for this xforms. 983 if (canSimplifyNullLoadOrGEP(LI, Op)) { 984 // Insert a new store to null instruction before the load to indicate 985 // that this code is not reachable. We do this instead of inserting 986 // an unreachable instruction directly because we cannot modify the 987 // CFG. 988 new StoreInst(UndefValue::get(LI.getType()), 989 Constant::getNullValue(Op->getType()), &LI); 990 return replaceInstUsesWith(LI, UndefValue::get(LI.getType())); 991 } 992 993 if (Op->hasOneUse()) { 994 // Change select and PHI nodes to select values instead of addresses: this 995 // helps alias analysis out a lot, allows many others simplifications, and 996 // exposes redundancy in the code. 997 // 998 // Note that we cannot do the transformation unless we know that the 999 // introduced loads cannot trap! Something like this is valid as long as 1000 // the condition is always false: load (select bool %C, int* null, int* %G), 1001 // but it would not be valid if we transformed it to load from null 1002 // unconditionally. 1003 // 1004 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) { 1005 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). 1006 unsigned Align = LI.getAlignment(); 1007 if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) && 1008 isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) { 1009 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1), 1010 SI->getOperand(1)->getName()+".val"); 1011 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2), 1012 SI->getOperand(2)->getName()+".val"); 1013 assert(LI.isUnordered() && "implied by above"); 1014 V1->setAlignment(Align); 1015 V1->setAtomic(LI.getOrdering(), LI.getSynchScope()); 1016 V2->setAlignment(Align); 1017 V2->setAtomic(LI.getOrdering(), LI.getSynchScope()); 1018 return SelectInst::Create(SI->getCondition(), V1, V2); 1019 } 1020 1021 // load (select (cond, null, P)) -> load P 1022 if (isa<ConstantPointerNull>(SI->getOperand(1)) && 1023 LI.getPointerAddressSpace() == 0) { 1024 LI.setOperand(0, SI->getOperand(2)); 1025 return &LI; 1026 } 1027 1028 // load (select (cond, P, null)) -> load P 1029 if (isa<ConstantPointerNull>(SI->getOperand(2)) && 1030 LI.getPointerAddressSpace() == 0) { 1031 LI.setOperand(0, SI->getOperand(1)); 1032 return &LI; 1033 } 1034 } 1035 } 1036 return nullptr; 1037 } 1038 1039 /// \brief Look for extractelement/insertvalue sequence that acts like a bitcast. 1040 /// 1041 /// \returns underlying value that was "cast", or nullptr otherwise. 1042 /// 1043 /// For example, if we have: 1044 /// 1045 /// %E0 = extractelement <2 x double> %U, i32 0 1046 /// %V0 = insertvalue [2 x double] undef, double %E0, 0 1047 /// %E1 = extractelement <2 x double> %U, i32 1 1048 /// %V1 = insertvalue [2 x double] %V0, double %E1, 1 1049 /// 1050 /// and the layout of a <2 x double> is isomorphic to a [2 x double], 1051 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U. 1052 /// Note that %U may contain non-undef values where %V1 has undef. 1053 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) { 1054 Value *U = nullptr; 1055 while (auto *IV = dyn_cast<InsertValueInst>(V)) { 1056 auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand()); 1057 if (!E) 1058 return nullptr; 1059 auto *W = E->getVectorOperand(); 1060 if (!U) 1061 U = W; 1062 else if (U != W) 1063 return nullptr; 1064 auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand()); 1065 if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin()) 1066 return nullptr; 1067 V = IV->getAggregateOperand(); 1068 } 1069 if (!isa<UndefValue>(V) ||!U) 1070 return nullptr; 1071 1072 auto *UT = cast<VectorType>(U->getType()); 1073 auto *VT = V->getType(); 1074 // Check that types UT and VT are bitwise isomorphic. 1075 const auto &DL = IC.getDataLayout(); 1076 if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) { 1077 return nullptr; 1078 } 1079 if (auto *AT = dyn_cast<ArrayType>(VT)) { 1080 if (AT->getNumElements() != UT->getNumElements()) 1081 return nullptr; 1082 } else { 1083 auto *ST = cast<StructType>(VT); 1084 if (ST->getNumElements() != UT->getNumElements()) 1085 return nullptr; 1086 for (const auto *EltT : ST->elements()) { 1087 if (EltT != UT->getElementType()) 1088 return nullptr; 1089 } 1090 } 1091 return U; 1092 } 1093 1094 /// \brief Combine stores to match the type of value being stored. 1095 /// 1096 /// The core idea here is that the memory does not have any intrinsic type and 1097 /// where we can we should match the type of a store to the type of value being 1098 /// stored. 1099 /// 1100 /// However, this routine must never change the width of a store or the number of 1101 /// stores as that would introduce a semantic change. This combine is expected to 1102 /// be a semantic no-op which just allows stores to more closely model the types 1103 /// of their incoming values. 1104 /// 1105 /// Currently, we also refuse to change the precise type used for an atomic or 1106 /// volatile store. This is debatable, and might be reasonable to change later. 1107 /// However, it is risky in case some backend or other part of LLVM is relying 1108 /// on the exact type stored to select appropriate atomic operations. 1109 /// 1110 /// \returns true if the store was successfully combined away. This indicates 1111 /// the caller must erase the store instruction. We have to let the caller erase 1112 /// the store instruction as otherwise there is no way to signal whether it was 1113 /// combined or not: IC.EraseInstFromFunction returns a null pointer. 1114 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) { 1115 // FIXME: We could probably with some care handle both volatile and ordered 1116 // atomic stores here but it isn't clear that this is important. 1117 if (!SI.isUnordered()) 1118 return false; 1119 1120 // swifterror values can't be bitcasted. 1121 if (SI.getPointerOperand()->isSwiftError()) 1122 return false; 1123 1124 Value *V = SI.getValueOperand(); 1125 1126 // Fold away bit casts of the stored value by storing the original type. 1127 if (auto *BC = dyn_cast<BitCastInst>(V)) { 1128 V = BC->getOperand(0); 1129 if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) { 1130 combineStoreToNewValue(IC, SI, V); 1131 return true; 1132 } 1133 } 1134 1135 if (Value *U = likeBitCastFromVector(IC, V)) 1136 if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) { 1137 combineStoreToNewValue(IC, SI, U); 1138 return true; 1139 } 1140 1141 // FIXME: We should also canonicalize stores of vectors when their elements 1142 // are cast to other types. 1143 return false; 1144 } 1145 1146 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) { 1147 // FIXME: We could probably with some care handle both volatile and atomic 1148 // stores here but it isn't clear that this is important. 1149 if (!SI.isSimple()) 1150 return false; 1151 1152 Value *V = SI.getValueOperand(); 1153 Type *T = V->getType(); 1154 1155 if (!T->isAggregateType()) 1156 return false; 1157 1158 if (auto *ST = dyn_cast<StructType>(T)) { 1159 // If the struct only have one element, we unpack. 1160 unsigned Count = ST->getNumElements(); 1161 if (Count == 1) { 1162 V = IC.Builder->CreateExtractValue(V, 0); 1163 combineStoreToNewValue(IC, SI, V); 1164 return true; 1165 } 1166 1167 // We don't want to break loads with padding here as we'd loose 1168 // the knowledge that padding exists for the rest of the pipeline. 1169 const DataLayout &DL = IC.getDataLayout(); 1170 auto *SL = DL.getStructLayout(ST); 1171 if (SL->hasPadding()) 1172 return false; 1173 1174 auto Align = SI.getAlignment(); 1175 if (!Align) 1176 Align = DL.getABITypeAlignment(ST); 1177 1178 SmallString<16> EltName = V->getName(); 1179 EltName += ".elt"; 1180 auto *Addr = SI.getPointerOperand(); 1181 SmallString<16> AddrName = Addr->getName(); 1182 AddrName += ".repack"; 1183 1184 auto *IdxType = Type::getInt32Ty(ST->getContext()); 1185 auto *Zero = ConstantInt::get(IdxType, 0); 1186 for (unsigned i = 0; i < Count; i++) { 1187 Value *Indices[2] = { 1188 Zero, 1189 ConstantInt::get(IdxType, i), 1190 }; 1191 auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), 1192 AddrName); 1193 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName); 1194 auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); 1195 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); 1196 } 1197 1198 return true; 1199 } 1200 1201 if (auto *AT = dyn_cast<ArrayType>(T)) { 1202 // If the array only have one element, we unpack. 1203 auto NumElements = AT->getNumElements(); 1204 if (NumElements == 1) { 1205 V = IC.Builder->CreateExtractValue(V, 0); 1206 combineStoreToNewValue(IC, SI, V); 1207 return true; 1208 } 1209 1210 // Bail out if the array is too large. Ideally we would like to optimize 1211 // arrays of arbitrary size but this has a terrible impact on compile time. 1212 // The threshold here is chosen arbitrarily, maybe needs a little bit of 1213 // tuning. 1214 if (NumElements > IC.MaxArraySizeForCombine) 1215 return false; 1216 1217 const DataLayout &DL = IC.getDataLayout(); 1218 auto EltSize = DL.getTypeAllocSize(AT->getElementType()); 1219 auto Align = SI.getAlignment(); 1220 if (!Align) 1221 Align = DL.getABITypeAlignment(T); 1222 1223 SmallString<16> EltName = V->getName(); 1224 EltName += ".elt"; 1225 auto *Addr = SI.getPointerOperand(); 1226 SmallString<16> AddrName = Addr->getName(); 1227 AddrName += ".repack"; 1228 1229 auto *IdxType = Type::getInt64Ty(T->getContext()); 1230 auto *Zero = ConstantInt::get(IdxType, 0); 1231 1232 uint64_t Offset = 0; 1233 for (uint64_t i = 0; i < NumElements; i++) { 1234 Value *Indices[2] = { 1235 Zero, 1236 ConstantInt::get(IdxType, i), 1237 }; 1238 auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), 1239 AddrName); 1240 auto *Val = IC.Builder->CreateExtractValue(V, i, EltName); 1241 auto EltAlign = MinAlign(Align, Offset); 1242 IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); 1243 Offset += EltSize; 1244 } 1245 1246 return true; 1247 } 1248 1249 return false; 1250 } 1251 1252 /// equivalentAddressValues - Test if A and B will obviously have the same 1253 /// value. This includes recognizing that %t0 and %t1 will have the same 1254 /// value in code like this: 1255 /// %t0 = getelementptr \@a, 0, 3 1256 /// store i32 0, i32* %t0 1257 /// %t1 = getelementptr \@a, 0, 3 1258 /// %t2 = load i32* %t1 1259 /// 1260 static bool equivalentAddressValues(Value *A, Value *B) { 1261 // Test if the values are trivially equivalent. 1262 if (A == B) return true; 1263 1264 // Test if the values come form identical arithmetic instructions. 1265 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because 1266 // its only used to compare two uses within the same basic block, which 1267 // means that they'll always either have the same value or one of them 1268 // will have an undefined value. 1269 if (isa<BinaryOperator>(A) || 1270 isa<CastInst>(A) || 1271 isa<PHINode>(A) || 1272 isa<GetElementPtrInst>(A)) 1273 if (Instruction *BI = dyn_cast<Instruction>(B)) 1274 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI)) 1275 return true; 1276 1277 // Otherwise they may not be equivalent. 1278 return false; 1279 } 1280 1281 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { 1282 Value *Val = SI.getOperand(0); 1283 Value *Ptr = SI.getOperand(1); 1284 1285 // Try to canonicalize the stored type. 1286 if (combineStoreToValueType(*this, SI)) 1287 return eraseInstFromFunction(SI); 1288 1289 // Attempt to improve the alignment. 1290 unsigned KnownAlign = getOrEnforceKnownAlignment( 1291 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT); 1292 unsigned StoreAlign = SI.getAlignment(); 1293 unsigned EffectiveStoreAlign = 1294 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType()); 1295 1296 if (KnownAlign > EffectiveStoreAlign) 1297 SI.setAlignment(KnownAlign); 1298 else if (StoreAlign == 0) 1299 SI.setAlignment(EffectiveStoreAlign); 1300 1301 // Try to canonicalize the stored type. 1302 if (unpackStoreToAggregate(*this, SI)) 1303 return eraseInstFromFunction(SI); 1304 1305 // Replace GEP indices if possible. 1306 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) { 1307 Worklist.Add(NewGEPI); 1308 return &SI; 1309 } 1310 1311 // Don't hack volatile/ordered stores. 1312 // FIXME: Some bits are legal for ordered atomic stores; needs refactoring. 1313 if (!SI.isUnordered()) return nullptr; 1314 1315 // If the RHS is an alloca with a single use, zapify the store, making the 1316 // alloca dead. 1317 if (Ptr->hasOneUse()) { 1318 if (isa<AllocaInst>(Ptr)) 1319 return eraseInstFromFunction(SI); 1320 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 1321 if (isa<AllocaInst>(GEP->getOperand(0))) { 1322 if (GEP->getOperand(0)->hasOneUse()) 1323 return eraseInstFromFunction(SI); 1324 } 1325 } 1326 } 1327 1328 // Do really simple DSE, to catch cases where there are several consecutive 1329 // stores to the same location, separated by a few arithmetic operations. This 1330 // situation often occurs with bitfield accesses. 1331 BasicBlock::iterator BBI(SI); 1332 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts; 1333 --ScanInsts) { 1334 --BBI; 1335 // Don't count debug info directives, lest they affect codegen, 1336 // and we skip pointer-to-pointer bitcasts, which are NOPs. 1337 if (isa<DbgInfoIntrinsic>(BBI) || 1338 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { 1339 ScanInsts++; 1340 continue; 1341 } 1342 1343 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) { 1344 // Prev store isn't volatile, and stores to the same location? 1345 if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1), 1346 SI.getOperand(1))) { 1347 ++NumDeadStore; 1348 ++BBI; 1349 eraseInstFromFunction(*PrevSI); 1350 continue; 1351 } 1352 break; 1353 } 1354 1355 // If this is a load, we have to stop. However, if the loaded value is from 1356 // the pointer we're loading and is producing the pointer we're storing, 1357 // then *this* store is dead (X = load P; store X -> P). 1358 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) { 1359 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) { 1360 assert(SI.isUnordered() && "can't eliminate ordering operation"); 1361 return eraseInstFromFunction(SI); 1362 } 1363 1364 // Otherwise, this is a load from some other location. Stores before it 1365 // may not be dead. 1366 break; 1367 } 1368 1369 // Don't skip over loads, throws or things that can modify memory. 1370 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow()) 1371 break; 1372 } 1373 1374 // store X, null -> turns into 'unreachable' in SimplifyCFG 1375 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) { 1376 if (!isa<UndefValue>(Val)) { 1377 SI.setOperand(0, UndefValue::get(Val->getType())); 1378 if (Instruction *U = dyn_cast<Instruction>(Val)) 1379 Worklist.Add(U); // Dropped a use. 1380 } 1381 return nullptr; // Do not modify these! 1382 } 1383 1384 // store undef, Ptr -> noop 1385 if (isa<UndefValue>(Val)) 1386 return eraseInstFromFunction(SI); 1387 1388 // If this store is the last instruction in the basic block (possibly 1389 // excepting debug info instructions), and if the block ends with an 1390 // unconditional branch, try to move it to the successor block. 1391 BBI = SI.getIterator(); 1392 do { 1393 ++BBI; 1394 } while (isa<DbgInfoIntrinsic>(BBI) || 1395 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())); 1396 if (BranchInst *BI = dyn_cast<BranchInst>(BBI)) 1397 if (BI->isUnconditional()) 1398 if (SimplifyStoreAtEndOfBlock(SI)) 1399 return nullptr; // xform done! 1400 1401 return nullptr; 1402 } 1403 1404 /// SimplifyStoreAtEndOfBlock - Turn things like: 1405 /// if () { *P = v1; } else { *P = v2 } 1406 /// into a phi node with a store in the successor. 1407 /// 1408 /// Simplify things like: 1409 /// *P = v1; if () { *P = v2; } 1410 /// into a phi node with a store in the successor. 1411 /// 1412 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) { 1413 assert(SI.isUnordered() && 1414 "this code has not been auditted for volatile or ordered store case"); 1415 1416 BasicBlock *StoreBB = SI.getParent(); 1417 1418 // Check to see if the successor block has exactly two incoming edges. If 1419 // so, see if the other predecessor contains a store to the same location. 1420 // if so, insert a PHI node (if needed) and move the stores down. 1421 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0); 1422 1423 // Determine whether Dest has exactly two predecessors and, if so, compute 1424 // the other predecessor. 1425 pred_iterator PI = pred_begin(DestBB); 1426 BasicBlock *P = *PI; 1427 BasicBlock *OtherBB = nullptr; 1428 1429 if (P != StoreBB) 1430 OtherBB = P; 1431 1432 if (++PI == pred_end(DestBB)) 1433 return false; 1434 1435 P = *PI; 1436 if (P != StoreBB) { 1437 if (OtherBB) 1438 return false; 1439 OtherBB = P; 1440 } 1441 if (++PI != pred_end(DestBB)) 1442 return false; 1443 1444 // Bail out if all the relevant blocks aren't distinct (this can happen, 1445 // for example, if SI is in an infinite loop) 1446 if (StoreBB == DestBB || OtherBB == DestBB) 1447 return false; 1448 1449 // Verify that the other block ends in a branch and is not otherwise empty. 1450 BasicBlock::iterator BBI(OtherBB->getTerminator()); 1451 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI); 1452 if (!OtherBr || BBI == OtherBB->begin()) 1453 return false; 1454 1455 // If the other block ends in an unconditional branch, check for the 'if then 1456 // else' case. there is an instruction before the branch. 1457 StoreInst *OtherStore = nullptr; 1458 if (OtherBr->isUnconditional()) { 1459 --BBI; 1460 // Skip over debugging info. 1461 while (isa<DbgInfoIntrinsic>(BBI) || 1462 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) { 1463 if (BBI==OtherBB->begin()) 1464 return false; 1465 --BBI; 1466 } 1467 // If this isn't a store, isn't a store to the same location, or is not the 1468 // right kind of store, bail out. 1469 OtherStore = dyn_cast<StoreInst>(BBI); 1470 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) || 1471 !SI.isSameOperationAs(OtherStore)) 1472 return false; 1473 } else { 1474 // Otherwise, the other block ended with a conditional branch. If one of the 1475 // destinations is StoreBB, then we have the if/then case. 1476 if (OtherBr->getSuccessor(0) != StoreBB && 1477 OtherBr->getSuccessor(1) != StoreBB) 1478 return false; 1479 1480 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an 1481 // if/then triangle. See if there is a store to the same ptr as SI that 1482 // lives in OtherBB. 1483 for (;; --BBI) { 1484 // Check to see if we find the matching store. 1485 if ((OtherStore = dyn_cast<StoreInst>(BBI))) { 1486 if (OtherStore->getOperand(1) != SI.getOperand(1) || 1487 !SI.isSameOperationAs(OtherStore)) 1488 return false; 1489 break; 1490 } 1491 // If we find something that may be using or overwriting the stored 1492 // value, or if we run out of instructions, we can't do the xform. 1493 if (BBI->mayReadFromMemory() || BBI->mayThrow() || 1494 BBI->mayWriteToMemory() || BBI == OtherBB->begin()) 1495 return false; 1496 } 1497 1498 // In order to eliminate the store in OtherBr, we have to 1499 // make sure nothing reads or overwrites the stored value in 1500 // StoreBB. 1501 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) { 1502 // FIXME: This should really be AA driven. 1503 if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory()) 1504 return false; 1505 } 1506 } 1507 1508 // Insert a PHI node now if we need it. 1509 Value *MergedVal = OtherStore->getOperand(0); 1510 if (MergedVal != SI.getOperand(0)) { 1511 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge"); 1512 PN->addIncoming(SI.getOperand(0), SI.getParent()); 1513 PN->addIncoming(OtherStore->getOperand(0), OtherBB); 1514 MergedVal = InsertNewInstBefore(PN, DestBB->front()); 1515 } 1516 1517 // Advance to a place where it is safe to insert the new store and 1518 // insert it. 1519 BBI = DestBB->getFirstInsertionPt(); 1520 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1), 1521 SI.isVolatile(), 1522 SI.getAlignment(), 1523 SI.getOrdering(), 1524 SI.getSynchScope()); 1525 InsertNewInstBefore(NewSI, *BBI); 1526 // The debug locations of the original instructions might differ; merge them. 1527 NewSI->setDebugLoc(DILocation::getMergedLocation(SI.getDebugLoc(), 1528 OtherStore->getDebugLoc())); 1529 1530 // If the two stores had AA tags, merge them. 1531 AAMDNodes AATags; 1532 SI.getAAMetadata(AATags); 1533 if (AATags) { 1534 OtherStore->getAAMetadata(AATags, /* Merge = */ true); 1535 NewSI->setAAMetadata(AATags); 1536 } 1537 1538 // Nuke the old stores. 1539 eraseInstFromFunction(SI); 1540 eraseInstFromFunction(*OtherStore); 1541 return true; 1542 } 1543