1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for load, store and alloca.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/MapVector.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/Analysis/Loads.h"
19 #include "llvm/IR/ConstantRange.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DebugInfoMetadata.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/MDBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Transforms/InstCombine/InstCombiner.h"
27 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 using namespace llvm;
30 using namespace PatternMatch;
31 
32 #define DEBUG_TYPE "instcombine"
33 
34 STATISTIC(NumDeadStore,    "Number of dead stores eliminated");
35 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
36 
37 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
38 /// pointer to an alloca.  Ignore any reads of the pointer, return false if we
39 /// see any stores or other unknown uses.  If we see pointer arithmetic, keep
40 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
41 /// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
42 /// the alloca, and if the source pointer is a pointer to a constant global, we
43 /// can optimize this.
44 static bool
45 isOnlyCopiedFromConstantMemory(AAResults *AA,
46                                Value *V, MemTransferInst *&TheCopy,
47                                SmallVectorImpl<Instruction *> &ToDelete) {
48   // We track lifetime intrinsics as we encounter them.  If we decide to go
49   // ahead and replace the value with the global, this lets the caller quickly
50   // eliminate the markers.
51 
52   SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
53   ValuesToInspect.emplace_back(V, false);
54   while (!ValuesToInspect.empty()) {
55     auto ValuePair = ValuesToInspect.pop_back_val();
56     const bool IsOffset = ValuePair.second;
57     for (auto &U : ValuePair.first->uses()) {
58       auto *I = cast<Instruction>(U.getUser());
59 
60       if (auto *LI = dyn_cast<LoadInst>(I)) {
61         // Ignore non-volatile loads, they are always ok.
62         if (!LI->isSimple()) return false;
63         continue;
64       }
65 
66       if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
67         // If uses of the bitcast are ok, we are ok.
68         ValuesToInspect.emplace_back(I, IsOffset);
69         continue;
70       }
71       if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
72         // If the GEP has all zero indices, it doesn't offset the pointer. If it
73         // doesn't, it does.
74         ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
75         continue;
76       }
77 
78       if (auto *Call = dyn_cast<CallBase>(I)) {
79         // If this is the function being called then we treat it like a load and
80         // ignore it.
81         if (Call->isCallee(&U))
82           continue;
83 
84         unsigned DataOpNo = Call->getDataOperandNo(&U);
85         bool IsArgOperand = Call->isArgOperand(&U);
86 
87         // Inalloca arguments are clobbered by the call.
88         if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
89           return false;
90 
91         // If this is a readonly/readnone call site, then we know it is just a
92         // load (but one that potentially returns the value itself), so we can
93         // ignore it if we know that the value isn't captured.
94         if (Call->onlyReadsMemory() &&
95             (Call->use_empty() || Call->doesNotCapture(DataOpNo)))
96           continue;
97 
98         // If this is being passed as a byval argument, the caller is making a
99         // copy, so it is only a read of the alloca.
100         if (IsArgOperand && Call->isByValArgument(DataOpNo))
101           continue;
102       }
103 
104       // Lifetime intrinsics can be handled by the caller.
105       if (I->isLifetimeStartOrEnd()) {
106         assert(I->use_empty() && "Lifetime markers have no result to use!");
107         ToDelete.push_back(I);
108         continue;
109       }
110 
111       // If this is isn't our memcpy/memmove, reject it as something we can't
112       // handle.
113       MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
114       if (!MI)
115         return false;
116 
117       // If the transfer is using the alloca as a source of the transfer, then
118       // ignore it since it is a load (unless the transfer is volatile).
119       if (U.getOperandNo() == 1) {
120         if (MI->isVolatile()) return false;
121         continue;
122       }
123 
124       // If we already have seen a copy, reject the second one.
125       if (TheCopy) return false;
126 
127       // If the pointer has been offset from the start of the alloca, we can't
128       // safely handle this.
129       if (IsOffset) return false;
130 
131       // If the memintrinsic isn't using the alloca as the dest, reject it.
132       if (U.getOperandNo() != 0) return false;
133 
134       // If the source of the memcpy/move is not a constant global, reject it.
135       if (!AA->pointsToConstantMemory(MI->getSource()))
136         return false;
137 
138       // Otherwise, the transform is safe.  Remember the copy instruction.
139       TheCopy = MI;
140     }
141   }
142   return true;
143 }
144 
145 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
146 /// modified by a copy from a constant global.  If we can prove this, we can
147 /// replace any uses of the alloca with uses of the global directly.
148 static MemTransferInst *
149 isOnlyCopiedFromConstantMemory(AAResults *AA,
150                                AllocaInst *AI,
151                                SmallVectorImpl<Instruction *> &ToDelete) {
152   MemTransferInst *TheCopy = nullptr;
153   if (isOnlyCopiedFromConstantMemory(AA, AI, TheCopy, ToDelete))
154     return TheCopy;
155   return nullptr;
156 }
157 
158 /// Returns true if V is dereferenceable for size of alloca.
159 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
160                                            const DataLayout &DL) {
161   if (AI->isArrayAllocation())
162     return false;
163   uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
164   if (!AllocaSize)
165     return false;
166   return isDereferenceableAndAlignedPointer(V, Align(AI->getAlignment()),
167                                             APInt(64, AllocaSize), DL);
168 }
169 
170 static Instruction *simplifyAllocaArraySize(InstCombinerImpl &IC,
171                                             AllocaInst &AI) {
172   // Check for array size of 1 (scalar allocation).
173   if (!AI.isArrayAllocation()) {
174     // i32 1 is the canonical array size for scalar allocations.
175     if (AI.getArraySize()->getType()->isIntegerTy(32))
176       return nullptr;
177 
178     // Canonicalize it.
179     return IC.replaceOperand(AI, 0, IC.Builder.getInt32(1));
180   }
181 
182   // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
183   if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
184     if (C->getValue().getActiveBits() <= 64) {
185       Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
186       AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
187       New->setAlignment(AI.getAlign());
188 
189       // Scan to the end of the allocation instructions, to skip over a block of
190       // allocas if possible...also skip interleaved debug info
191       //
192       BasicBlock::iterator It(New);
193       while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
194         ++It;
195 
196       // Now that I is pointing to the first non-allocation-inst in the block,
197       // insert our getelementptr instruction...
198       //
199       Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
200       Value *NullIdx = Constant::getNullValue(IdxTy);
201       Value *Idx[2] = {NullIdx, NullIdx};
202       Instruction *GEP = GetElementPtrInst::CreateInBounds(
203           NewTy, New, Idx, New->getName() + ".sub");
204       IC.InsertNewInstBefore(GEP, *It);
205 
206       // Now make everything use the getelementptr instead of the original
207       // allocation.
208       return IC.replaceInstUsesWith(AI, GEP);
209     }
210   }
211 
212   if (isa<UndefValue>(AI.getArraySize()))
213     return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
214 
215   // Ensure that the alloca array size argument has type intptr_t, so that
216   // any casting is exposed early.
217   Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
218   if (AI.getArraySize()->getType() != IntPtrTy) {
219     Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
220     return IC.replaceOperand(AI, 0, V);
221   }
222 
223   return nullptr;
224 }
225 
226 namespace {
227 // If I and V are pointers in different address space, it is not allowed to
228 // use replaceAllUsesWith since I and V have different types. A
229 // non-target-specific transformation should not use addrspacecast on V since
230 // the two address space may be disjoint depending on target.
231 //
232 // This class chases down uses of the old pointer until reaching the load
233 // instructions, then replaces the old pointer in the load instructions with
234 // the new pointer. If during the chasing it sees bitcast or GEP, it will
235 // create new bitcast or GEP with the new pointer and use them in the load
236 // instruction.
237 class PointerReplacer {
238 public:
239   PointerReplacer(InstCombinerImpl &IC) : IC(IC) {}
240   void replacePointer(Instruction &I, Value *V);
241 
242 private:
243   void findLoadAndReplace(Instruction &I);
244   void replace(Instruction *I);
245   Value *getReplacement(Value *I);
246 
247   SmallVector<Instruction *, 4> Path;
248   MapVector<Value *, Value *> WorkMap;
249   InstCombinerImpl &IC;
250 };
251 } // end anonymous namespace
252 
253 void PointerReplacer::findLoadAndReplace(Instruction &I) {
254   for (auto U : I.users()) {
255     auto *Inst = dyn_cast<Instruction>(&*U);
256     if (!Inst)
257       return;
258     LLVM_DEBUG(dbgs() << "Found pointer user: " << *U << '\n');
259     if (isa<LoadInst>(Inst)) {
260       for (auto P : Path)
261         replace(P);
262       replace(Inst);
263     } else if (isa<GetElementPtrInst>(Inst) || isa<BitCastInst>(Inst)) {
264       Path.push_back(Inst);
265       findLoadAndReplace(*Inst);
266       Path.pop_back();
267     } else {
268       return;
269     }
270   }
271 }
272 
273 Value *PointerReplacer::getReplacement(Value *V) {
274   auto Loc = WorkMap.find(V);
275   if (Loc != WorkMap.end())
276     return Loc->second;
277   return nullptr;
278 }
279 
280 void PointerReplacer::replace(Instruction *I) {
281   if (getReplacement(I))
282     return;
283 
284   if (auto *LT = dyn_cast<LoadInst>(I)) {
285     auto *V = getReplacement(LT->getPointerOperand());
286     assert(V && "Operand not replaced");
287     auto *NewI = new LoadInst(I->getType(), V, "", false,
288                               IC.getDataLayout().getABITypeAlign(I->getType()));
289     NewI->takeName(LT);
290     IC.InsertNewInstWith(NewI, *LT);
291     IC.replaceInstUsesWith(*LT, NewI);
292     WorkMap[LT] = NewI;
293   } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
294     auto *V = getReplacement(GEP->getPointerOperand());
295     assert(V && "Operand not replaced");
296     SmallVector<Value *, 8> Indices;
297     Indices.append(GEP->idx_begin(), GEP->idx_end());
298     auto *NewI = GetElementPtrInst::Create(
299         V->getType()->getPointerElementType(), V, Indices);
300     IC.InsertNewInstWith(NewI, *GEP);
301     NewI->takeName(GEP);
302     WorkMap[GEP] = NewI;
303   } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
304     auto *V = getReplacement(BC->getOperand(0));
305     assert(V && "Operand not replaced");
306     auto *NewT = PointerType::get(BC->getType()->getPointerElementType(),
307                                   V->getType()->getPointerAddressSpace());
308     auto *NewI = new BitCastInst(V, NewT);
309     IC.InsertNewInstWith(NewI, *BC);
310     NewI->takeName(BC);
311     WorkMap[BC] = NewI;
312   } else {
313     llvm_unreachable("should never reach here");
314   }
315 }
316 
317 void PointerReplacer::replacePointer(Instruction &I, Value *V) {
318 #ifndef NDEBUG
319   auto *PT = cast<PointerType>(I.getType());
320   auto *NT = cast<PointerType>(V->getType());
321   assert(PT != NT && PT->getElementType() == NT->getElementType() &&
322          "Invalid usage");
323 #endif
324   WorkMap[&I] = V;
325   findLoadAndReplace(I);
326 }
327 
328 Instruction *InstCombinerImpl::visitAllocaInst(AllocaInst &AI) {
329   if (auto *I = simplifyAllocaArraySize(*this, AI))
330     return I;
331 
332   if (AI.getAllocatedType()->isSized()) {
333     // Move all alloca's of zero byte objects to the entry block and merge them
334     // together.  Note that we only do this for alloca's, because malloc should
335     // allocate and return a unique pointer, even for a zero byte allocation.
336     if (DL.getTypeAllocSize(AI.getAllocatedType()).getKnownMinSize() == 0) {
337       // For a zero sized alloca there is no point in doing an array allocation.
338       // This is helpful if the array size is a complicated expression not used
339       // elsewhere.
340       if (AI.isArrayAllocation())
341         return replaceOperand(AI, 0,
342             ConstantInt::get(AI.getArraySize()->getType(), 1));
343 
344       // Get the first instruction in the entry block.
345       BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
346       Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
347       if (FirstInst != &AI) {
348         // If the entry block doesn't start with a zero-size alloca then move
349         // this one to the start of the entry block.  There is no problem with
350         // dominance as the array size was forced to a constant earlier already.
351         AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
352         if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
353             DL.getTypeAllocSize(EntryAI->getAllocatedType())
354                     .getKnownMinSize() != 0) {
355           AI.moveBefore(FirstInst);
356           return &AI;
357         }
358 
359         // Replace this zero-sized alloca with the one at the start of the entry
360         // block after ensuring that the address will be aligned enough for both
361         // types.
362         const Align MaxAlign = std::max(EntryAI->getAlign(), AI.getAlign());
363         EntryAI->setAlignment(MaxAlign);
364         if (AI.getType() != EntryAI->getType())
365           return new BitCastInst(EntryAI, AI.getType());
366         return replaceInstUsesWith(AI, EntryAI);
367       }
368     }
369   }
370 
371   // Check to see if this allocation is only modified by a memcpy/memmove from
372   // a constant whose alignment is equal to or exceeds that of the allocation.
373   // If this is the case, we can change all users to use the constant global
374   // instead.  This is commonly produced by the CFE by constructs like "void
375   // foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' is only subsequently
376   // read.
377   SmallVector<Instruction *, 4> ToDelete;
378   if (MemTransferInst *Copy = isOnlyCopiedFromConstantMemory(AA, &AI, ToDelete)) {
379     Align AllocaAlign = AI.getAlign();
380     Align SourceAlign = getOrEnforceKnownAlignment(
381         Copy->getSource(), AllocaAlign, DL, &AI, &AC, &DT);
382     if (AllocaAlign <= SourceAlign &&
383         isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) {
384       LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
385       LLVM_DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
386       for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
387         eraseInstFromFunction(*ToDelete[i]);
388       Value *TheSrc = Copy->getSource();
389       auto *SrcTy = TheSrc->getType();
390       auto *DestTy = PointerType::get(AI.getType()->getPointerElementType(),
391                                       SrcTy->getPointerAddressSpace());
392       Value *Cast =
393         Builder.CreatePointerBitCastOrAddrSpaceCast(TheSrc, DestTy);
394       if (AI.getType()->getPointerAddressSpace() ==
395           SrcTy->getPointerAddressSpace()) {
396         Instruction *NewI = replaceInstUsesWith(AI, Cast);
397         eraseInstFromFunction(*Copy);
398         ++NumGlobalCopies;
399         return NewI;
400       }
401 
402       PointerReplacer PtrReplacer(*this);
403       PtrReplacer.replacePointer(AI, Cast);
404       ++NumGlobalCopies;
405     }
406   }
407 
408   // At last, use the generic allocation site handler to aggressively remove
409   // unused allocas.
410   return visitAllocSite(AI);
411 }
412 
413 // Are we allowed to form a atomic load or store of this type?
414 static bool isSupportedAtomicType(Type *Ty) {
415   return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
416 }
417 
418 /// Helper to combine a load to a new type.
419 ///
420 /// This just does the work of combining a load to a new type. It handles
421 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
422 /// loaded *value* type. This will convert it to a pointer, cast the operand to
423 /// that pointer type, load it, etc.
424 ///
425 /// Note that this will create all of the instructions with whatever insert
426 /// point the \c InstCombinerImpl currently is using.
427 LoadInst *InstCombinerImpl::combineLoadToNewType(LoadInst &LI, Type *NewTy,
428                                                  const Twine &Suffix) {
429   assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
430          "can't fold an atomic load to requested type");
431 
432   Value *Ptr = LI.getPointerOperand();
433   unsigned AS = LI.getPointerAddressSpace();
434   Value *NewPtr = nullptr;
435   if (!(match(Ptr, m_BitCast(m_Value(NewPtr))) &&
436         NewPtr->getType()->getPointerElementType() == NewTy &&
437         NewPtr->getType()->getPointerAddressSpace() == AS))
438     NewPtr = Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS));
439 
440   LoadInst *NewLoad = Builder.CreateAlignedLoad(
441       NewTy, NewPtr, LI.getAlign(), LI.isVolatile(), LI.getName() + Suffix);
442   NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
443   copyMetadataForLoad(*NewLoad, LI);
444   return NewLoad;
445 }
446 
447 /// Combine a store to a new type.
448 ///
449 /// Returns the newly created store instruction.
450 static StoreInst *combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI,
451                                          Value *V) {
452   assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
453          "can't fold an atomic store of requested type");
454 
455   Value *Ptr = SI.getPointerOperand();
456   unsigned AS = SI.getPointerAddressSpace();
457   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
458   SI.getAllMetadata(MD);
459 
460   StoreInst *NewStore = IC.Builder.CreateAlignedStore(
461       V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
462       SI.getAlign(), SI.isVolatile());
463   NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
464   for (const auto &MDPair : MD) {
465     unsigned ID = MDPair.first;
466     MDNode *N = MDPair.second;
467     // Note, essentially every kind of metadata should be preserved here! This
468     // routine is supposed to clone a store instruction changing *only its
469     // type*. The only metadata it makes sense to drop is metadata which is
470     // invalidated when the pointer type changes. This should essentially
471     // never be the case in LLVM, but we explicitly switch over only known
472     // metadata to be conservatively correct. If you are adding metadata to
473     // LLVM which pertains to stores, you almost certainly want to add it
474     // here.
475     switch (ID) {
476     case LLVMContext::MD_dbg:
477     case LLVMContext::MD_tbaa:
478     case LLVMContext::MD_prof:
479     case LLVMContext::MD_fpmath:
480     case LLVMContext::MD_tbaa_struct:
481     case LLVMContext::MD_alias_scope:
482     case LLVMContext::MD_noalias:
483     case LLVMContext::MD_nontemporal:
484     case LLVMContext::MD_mem_parallel_loop_access:
485     case LLVMContext::MD_access_group:
486       // All of these directly apply.
487       NewStore->setMetadata(ID, N);
488       break;
489     case LLVMContext::MD_invariant_load:
490     case LLVMContext::MD_nonnull:
491     case LLVMContext::MD_range:
492     case LLVMContext::MD_align:
493     case LLVMContext::MD_dereferenceable:
494     case LLVMContext::MD_dereferenceable_or_null:
495       // These don't apply for stores.
496       break;
497     }
498   }
499 
500   return NewStore;
501 }
502 
503 /// Returns true if instruction represent minmax pattern like:
504 ///   select ((cmp load V1, load V2), V1, V2).
505 static bool isMinMaxWithLoads(Value *V, Type *&LoadTy) {
506   assert(V->getType()->isPointerTy() && "Expected pointer type.");
507   // Ignore possible ty* to ixx* bitcast.
508   V = InstCombiner::peekThroughBitcast(V);
509   // Check that select is select ((cmp load V1, load V2), V1, V2) - minmax
510   // pattern.
511   CmpInst::Predicate Pred;
512   Instruction *L1;
513   Instruction *L2;
514   Value *LHS;
515   Value *RHS;
516   if (!match(V, m_Select(m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2)),
517                          m_Value(LHS), m_Value(RHS))))
518     return false;
519   LoadTy = L1->getType();
520   return (match(L1, m_Load(m_Specific(LHS))) &&
521           match(L2, m_Load(m_Specific(RHS)))) ||
522          (match(L1, m_Load(m_Specific(RHS))) &&
523           match(L2, m_Load(m_Specific(LHS))));
524 }
525 
526 /// Combine loads to match the type of their uses' value after looking
527 /// through intervening bitcasts.
528 ///
529 /// The core idea here is that if the result of a load is used in an operation,
530 /// we should load the type most conducive to that operation. For example, when
531 /// loading an integer and converting that immediately to a pointer, we should
532 /// instead directly load a pointer.
533 ///
534 /// However, this routine must never change the width of a load or the number of
535 /// loads as that would introduce a semantic change. This combine is expected to
536 /// be a semantic no-op which just allows loads to more closely model the types
537 /// of their consuming operations.
538 ///
539 /// Currently, we also refuse to change the precise type used for an atomic load
540 /// or a volatile load. This is debatable, and might be reasonable to change
541 /// later. However, it is risky in case some backend or other part of LLVM is
542 /// relying on the exact type loaded to select appropriate atomic operations.
543 static Instruction *combineLoadToOperationType(InstCombinerImpl &IC,
544                                                LoadInst &LI) {
545   // FIXME: We could probably with some care handle both volatile and ordered
546   // atomic loads here but it isn't clear that this is important.
547   if (!LI.isUnordered())
548     return nullptr;
549 
550   if (LI.use_empty())
551     return nullptr;
552 
553   // swifterror values can't be bitcasted.
554   if (LI.getPointerOperand()->isSwiftError())
555     return nullptr;
556 
557   const DataLayout &DL = IC.getDataLayout();
558 
559   // Fold away bit casts of the loaded value by loading the desired type.
560   // Note that we should not do this for pointer<->integer casts,
561   // because that would result in type punning.
562   if (LI.hasOneUse())
563     if (auto* CI = dyn_cast<CastInst>(LI.user_back()))
564       if (CI->isNoopCast(DL) && LI.getType()->isPtrOrPtrVectorTy() ==
565                                     CI->getDestTy()->isPtrOrPtrVectorTy())
566         if (!LI.isAtomic() || isSupportedAtomicType(CI->getDestTy())) {
567           LoadInst *NewLoad = IC.combineLoadToNewType(LI, CI->getDestTy());
568           CI->replaceAllUsesWith(NewLoad);
569           IC.eraseInstFromFunction(*CI);
570           return &LI;
571         }
572 
573   // FIXME: We should also canonicalize loads of vectors when their elements are
574   // cast to other types.
575   return nullptr;
576 }
577 
578 static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {
579   // FIXME: We could probably with some care handle both volatile and atomic
580   // stores here but it isn't clear that this is important.
581   if (!LI.isSimple())
582     return nullptr;
583 
584   Type *T = LI.getType();
585   if (!T->isAggregateType())
586     return nullptr;
587 
588   StringRef Name = LI.getName();
589   assert(LI.getAlignment() && "Alignment must be set at this point");
590 
591   if (auto *ST = dyn_cast<StructType>(T)) {
592     // If the struct only have one element, we unpack.
593     auto NumElements = ST->getNumElements();
594     if (NumElements == 1) {
595       LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
596                                                   ".unpack");
597       AAMDNodes AAMD;
598       LI.getAAMetadata(AAMD);
599       NewLoad->setAAMetadata(AAMD);
600       return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
601         UndefValue::get(T), NewLoad, 0, Name));
602     }
603 
604     // We don't want to break loads with padding here as we'd loose
605     // the knowledge that padding exists for the rest of the pipeline.
606     const DataLayout &DL = IC.getDataLayout();
607     auto *SL = DL.getStructLayout(ST);
608     if (SL->hasPadding())
609       return nullptr;
610 
611     const auto Align = LI.getAlign();
612     auto *Addr = LI.getPointerOperand();
613     auto *IdxType = Type::getInt32Ty(T->getContext());
614     auto *Zero = ConstantInt::get(IdxType, 0);
615 
616     Value *V = UndefValue::get(T);
617     for (unsigned i = 0; i < NumElements; i++) {
618       Value *Indices[2] = {
619         Zero,
620         ConstantInt::get(IdxType, i),
621       };
622       auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
623                                                Name + ".elt");
624       auto *L = IC.Builder.CreateAlignedLoad(
625           ST->getElementType(i), Ptr,
626           commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack");
627       // Propagate AA metadata. It'll still be valid on the narrowed load.
628       AAMDNodes AAMD;
629       LI.getAAMetadata(AAMD);
630       L->setAAMetadata(AAMD);
631       V = IC.Builder.CreateInsertValue(V, L, i);
632     }
633 
634     V->setName(Name);
635     return IC.replaceInstUsesWith(LI, V);
636   }
637 
638   if (auto *AT = dyn_cast<ArrayType>(T)) {
639     auto *ET = AT->getElementType();
640     auto NumElements = AT->getNumElements();
641     if (NumElements == 1) {
642       LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
643       AAMDNodes AAMD;
644       LI.getAAMetadata(AAMD);
645       NewLoad->setAAMetadata(AAMD);
646       return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
647         UndefValue::get(T), NewLoad, 0, Name));
648     }
649 
650     // Bail out if the array is too large. Ideally we would like to optimize
651     // arrays of arbitrary size but this has a terrible impact on compile time.
652     // The threshold here is chosen arbitrarily, maybe needs a little bit of
653     // tuning.
654     if (NumElements > IC.MaxArraySizeForCombine)
655       return nullptr;
656 
657     const DataLayout &DL = IC.getDataLayout();
658     auto EltSize = DL.getTypeAllocSize(ET);
659     const auto Align = LI.getAlign();
660 
661     auto *Addr = LI.getPointerOperand();
662     auto *IdxType = Type::getInt64Ty(T->getContext());
663     auto *Zero = ConstantInt::get(IdxType, 0);
664 
665     Value *V = UndefValue::get(T);
666     uint64_t Offset = 0;
667     for (uint64_t i = 0; i < NumElements; i++) {
668       Value *Indices[2] = {
669         Zero,
670         ConstantInt::get(IdxType, i),
671       };
672       auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
673                                                Name + ".elt");
674       auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
675                                              commonAlignment(Align, Offset),
676                                              Name + ".unpack");
677       AAMDNodes AAMD;
678       LI.getAAMetadata(AAMD);
679       L->setAAMetadata(AAMD);
680       V = IC.Builder.CreateInsertValue(V, L, i);
681       Offset += EltSize;
682     }
683 
684     V->setName(Name);
685     return IC.replaceInstUsesWith(LI, V);
686   }
687 
688   return nullptr;
689 }
690 
691 // If we can determine that all possible objects pointed to by the provided
692 // pointer value are, not only dereferenceable, but also definitively less than
693 // or equal to the provided maximum size, then return true. Otherwise, return
694 // false (constant global values and allocas fall into this category).
695 //
696 // FIXME: This should probably live in ValueTracking (or similar).
697 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
698                                      const DataLayout &DL) {
699   SmallPtrSet<Value *, 4> Visited;
700   SmallVector<Value *, 4> Worklist(1, V);
701 
702   do {
703     Value *P = Worklist.pop_back_val();
704     P = P->stripPointerCasts();
705 
706     if (!Visited.insert(P).second)
707       continue;
708 
709     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
710       Worklist.push_back(SI->getTrueValue());
711       Worklist.push_back(SI->getFalseValue());
712       continue;
713     }
714 
715     if (PHINode *PN = dyn_cast<PHINode>(P)) {
716       for (Value *IncValue : PN->incoming_values())
717         Worklist.push_back(IncValue);
718       continue;
719     }
720 
721     if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
722       if (GA->isInterposable())
723         return false;
724       Worklist.push_back(GA->getAliasee());
725       continue;
726     }
727 
728     // If we know how big this object is, and it is less than MaxSize, continue
729     // searching. Otherwise, return false.
730     if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
731       if (!AI->getAllocatedType()->isSized())
732         return false;
733 
734       ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
735       if (!CS)
736         return false;
737 
738       uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
739       // Make sure that, even if the multiplication below would wrap as an
740       // uint64_t, we still do the right thing.
741       if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
742         return false;
743       continue;
744     }
745 
746     if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
747       if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
748         return false;
749 
750       uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
751       if (InitSize > MaxSize)
752         return false;
753       continue;
754     }
755 
756     return false;
757   } while (!Worklist.empty());
758 
759   return true;
760 }
761 
762 // If we're indexing into an object of a known size, and the outer index is
763 // not a constant, but having any value but zero would lead to undefined
764 // behavior, replace it with zero.
765 //
766 // For example, if we have:
767 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
768 // ...
769 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
770 // ... = load i32* %arrayidx, align 4
771 // Then we know that we can replace %x in the GEP with i64 0.
772 //
773 // FIXME: We could fold any GEP index to zero that would cause UB if it were
774 // not zero. Currently, we only handle the first such index. Also, we could
775 // also search through non-zero constant indices if we kept track of the
776 // offsets those indices implied.
777 static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC,
778                                      GetElementPtrInst *GEPI, Instruction *MemI,
779                                      unsigned &Idx) {
780   if (GEPI->getNumOperands() < 2)
781     return false;
782 
783   // Find the first non-zero index of a GEP. If all indices are zero, return
784   // one past the last index.
785   auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
786     unsigned I = 1;
787     for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
788       Value *V = GEPI->getOperand(I);
789       if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
790         if (CI->isZero())
791           continue;
792 
793       break;
794     }
795 
796     return I;
797   };
798 
799   // Skip through initial 'zero' indices, and find the corresponding pointer
800   // type. See if the next index is not a constant.
801   Idx = FirstNZIdx(GEPI);
802   if (Idx == GEPI->getNumOperands())
803     return false;
804   if (isa<Constant>(GEPI->getOperand(Idx)))
805     return false;
806 
807   SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
808   Type *AllocTy =
809     GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
810   if (!AllocTy || !AllocTy->isSized())
811     return false;
812   const DataLayout &DL = IC.getDataLayout();
813   uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
814 
815   // If there are more indices after the one we might replace with a zero, make
816   // sure they're all non-negative. If any of them are negative, the overall
817   // address being computed might be before the base address determined by the
818   // first non-zero index.
819   auto IsAllNonNegative = [&]() {
820     for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
821       KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), 0, MemI);
822       if (Known.isNonNegative())
823         continue;
824       return false;
825     }
826 
827     return true;
828   };
829 
830   // FIXME: If the GEP is not inbounds, and there are extra indices after the
831   // one we'll replace, those could cause the address computation to wrap
832   // (rendering the IsAllNonNegative() check below insufficient). We can do
833   // better, ignoring zero indices (and other indices we can prove small
834   // enough not to wrap).
835   if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
836     return false;
837 
838   // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
839   // also known to be dereferenceable.
840   return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
841          IsAllNonNegative();
842 }
843 
844 // If we're indexing into an object with a variable index for the memory
845 // access, but the object has only one element, we can assume that the index
846 // will always be zero. If we replace the GEP, return it.
847 template <typename T>
848 static Instruction *replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr,
849                                           T &MemI) {
850   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
851     unsigned Idx;
852     if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
853       Instruction *NewGEPI = GEPI->clone();
854       NewGEPI->setOperand(Idx,
855         ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
856       NewGEPI->insertBefore(GEPI);
857       MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
858       return NewGEPI;
859     }
860   }
861 
862   return nullptr;
863 }
864 
865 static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
866   if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
867     return false;
868 
869   auto *Ptr = SI.getPointerOperand();
870   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
871     Ptr = GEPI->getOperand(0);
872   return (isa<ConstantPointerNull>(Ptr) &&
873           !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
874 }
875 
876 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
877   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
878     const Value *GEPI0 = GEPI->getOperand(0);
879     if (isa<ConstantPointerNull>(GEPI0) &&
880         !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
881       return true;
882   }
883   if (isa<UndefValue>(Op) ||
884       (isa<ConstantPointerNull>(Op) &&
885        !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace())))
886     return true;
887   return false;
888 }
889 
890 Instruction *InstCombinerImpl::visitLoadInst(LoadInst &LI) {
891   Value *Op = LI.getOperand(0);
892 
893   // Try to canonicalize the loaded type.
894   if (Instruction *Res = combineLoadToOperationType(*this, LI))
895     return Res;
896 
897   // Attempt to improve the alignment.
898   Align KnownAlign = getOrEnforceKnownAlignment(
899       Op, DL.getPrefTypeAlign(LI.getType()), DL, &LI, &AC, &DT);
900   if (KnownAlign > LI.getAlign())
901     LI.setAlignment(KnownAlign);
902 
903   // Replace GEP indices if possible.
904   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
905       Worklist.push(NewGEPI);
906       return &LI;
907   }
908 
909   if (Instruction *Res = unpackLoadToAggregate(*this, LI))
910     return Res;
911 
912   // Do really simple store-to-load forwarding and load CSE, to catch cases
913   // where there are several consecutive memory accesses to the same location,
914   // separated by a few arithmetic operations.
915   BasicBlock::iterator BBI(LI);
916   bool IsLoadCSE = false;
917   if (Value *AvailableVal = FindAvailableLoadedValue(
918           &LI, LI.getParent(), BBI, DefMaxInstsToScan, AA, &IsLoadCSE)) {
919     if (IsLoadCSE)
920       combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
921 
922     return replaceInstUsesWith(
923         LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
924                                            LI.getName() + ".cast"));
925   }
926 
927   // None of the following transforms are legal for volatile/ordered atomic
928   // loads.  Most of them do apply for unordered atomics.
929   if (!LI.isUnordered()) return nullptr;
930 
931   // load(gep null, ...) -> unreachable
932   // load null/undef -> unreachable
933   // TODO: Consider a target hook for valid address spaces for this xforms.
934   if (canSimplifyNullLoadOrGEP(LI, Op)) {
935     // Insert a new store to null instruction before the load to indicate
936     // that this code is not reachable.  We do this instead of inserting
937     // an unreachable instruction directly because we cannot modify the
938     // CFG.
939     StoreInst *SI = new StoreInst(UndefValue::get(LI.getType()),
940                                   Constant::getNullValue(Op->getType()), &LI);
941     SI->setDebugLoc(LI.getDebugLoc());
942     return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
943   }
944 
945   if (Op->hasOneUse()) {
946     // Change select and PHI nodes to select values instead of addresses: this
947     // helps alias analysis out a lot, allows many others simplifications, and
948     // exposes redundancy in the code.
949     //
950     // Note that we cannot do the transformation unless we know that the
951     // introduced loads cannot trap!  Something like this is valid as long as
952     // the condition is always false: load (select bool %C, int* null, int* %G),
953     // but it would not be valid if we transformed it to load from null
954     // unconditionally.
955     //
956     if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
957       // load (select (Cond, &V1, &V2))  --> select(Cond, load &V1, load &V2).
958       Align Alignment = LI.getAlign();
959       if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
960                                       Alignment, DL, SI) &&
961           isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
962                                       Alignment, DL, SI)) {
963         LoadInst *V1 =
964             Builder.CreateLoad(LI.getType(), SI->getOperand(1),
965                                SI->getOperand(1)->getName() + ".val");
966         LoadInst *V2 =
967             Builder.CreateLoad(LI.getType(), SI->getOperand(2),
968                                SI->getOperand(2)->getName() + ".val");
969         assert(LI.isUnordered() && "implied by above");
970         V1->setAlignment(Alignment);
971         V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
972         V2->setAlignment(Alignment);
973         V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
974         return SelectInst::Create(SI->getCondition(), V1, V2);
975       }
976 
977       // load (select (cond, null, P)) -> load P
978       if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
979           !NullPointerIsDefined(SI->getFunction(),
980                                 LI.getPointerAddressSpace()))
981         return replaceOperand(LI, 0, SI->getOperand(2));
982 
983       // load (select (cond, P, null)) -> load P
984       if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
985           !NullPointerIsDefined(SI->getFunction(),
986                                 LI.getPointerAddressSpace()))
987         return replaceOperand(LI, 0, SI->getOperand(1));
988     }
989   }
990   return nullptr;
991 }
992 
993 /// Look for extractelement/insertvalue sequence that acts like a bitcast.
994 ///
995 /// \returns underlying value that was "cast", or nullptr otherwise.
996 ///
997 /// For example, if we have:
998 ///
999 ///     %E0 = extractelement <2 x double> %U, i32 0
1000 ///     %V0 = insertvalue [2 x double] undef, double %E0, 0
1001 ///     %E1 = extractelement <2 x double> %U, i32 1
1002 ///     %V1 = insertvalue [2 x double] %V0, double %E1, 1
1003 ///
1004 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1005 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1006 /// Note that %U may contain non-undef values where %V1 has undef.
1007 static Value *likeBitCastFromVector(InstCombinerImpl &IC, Value *V) {
1008   Value *U = nullptr;
1009   while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1010     auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1011     if (!E)
1012       return nullptr;
1013     auto *W = E->getVectorOperand();
1014     if (!U)
1015       U = W;
1016     else if (U != W)
1017       return nullptr;
1018     auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1019     if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1020       return nullptr;
1021     V = IV->getAggregateOperand();
1022   }
1023   if (!isa<UndefValue>(V) ||!U)
1024     return nullptr;
1025 
1026   auto *UT = cast<VectorType>(U->getType());
1027   auto *VT = V->getType();
1028   // Check that types UT and VT are bitwise isomorphic.
1029   const auto &DL = IC.getDataLayout();
1030   if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1031     return nullptr;
1032   }
1033   if (auto *AT = dyn_cast<ArrayType>(VT)) {
1034     if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1035       return nullptr;
1036   } else {
1037     auto *ST = cast<StructType>(VT);
1038     if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1039       return nullptr;
1040     for (const auto *EltT : ST->elements()) {
1041       if (EltT != UT->getElementType())
1042         return nullptr;
1043     }
1044   }
1045   return U;
1046 }
1047 
1048 /// Combine stores to match the type of value being stored.
1049 ///
1050 /// The core idea here is that the memory does not have any intrinsic type and
1051 /// where we can we should match the type of a store to the type of value being
1052 /// stored.
1053 ///
1054 /// However, this routine must never change the width of a store or the number of
1055 /// stores as that would introduce a semantic change. This combine is expected to
1056 /// be a semantic no-op which just allows stores to more closely model the types
1057 /// of their incoming values.
1058 ///
1059 /// Currently, we also refuse to change the precise type used for an atomic or
1060 /// volatile store. This is debatable, and might be reasonable to change later.
1061 /// However, it is risky in case some backend or other part of LLVM is relying
1062 /// on the exact type stored to select appropriate atomic operations.
1063 ///
1064 /// \returns true if the store was successfully combined away. This indicates
1065 /// the caller must erase the store instruction. We have to let the caller erase
1066 /// the store instruction as otherwise there is no way to signal whether it was
1067 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
1068 static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI) {
1069   // FIXME: We could probably with some care handle both volatile and ordered
1070   // atomic stores here but it isn't clear that this is important.
1071   if (!SI.isUnordered())
1072     return false;
1073 
1074   // swifterror values can't be bitcasted.
1075   if (SI.getPointerOperand()->isSwiftError())
1076     return false;
1077 
1078   Value *V = SI.getValueOperand();
1079 
1080   // Fold away bit casts of the stored value by storing the original type.
1081   if (auto *BC = dyn_cast<BitCastInst>(V)) {
1082     V = BC->getOperand(0);
1083     if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1084       combineStoreToNewValue(IC, SI, V);
1085       return true;
1086     }
1087   }
1088 
1089   if (Value *U = likeBitCastFromVector(IC, V))
1090     if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1091       combineStoreToNewValue(IC, SI, U);
1092       return true;
1093     }
1094 
1095   // FIXME: We should also canonicalize stores of vectors when their elements
1096   // are cast to other types.
1097   return false;
1098 }
1099 
1100 static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI) {
1101   // FIXME: We could probably with some care handle both volatile and atomic
1102   // stores here but it isn't clear that this is important.
1103   if (!SI.isSimple())
1104     return false;
1105 
1106   Value *V = SI.getValueOperand();
1107   Type *T = V->getType();
1108 
1109   if (!T->isAggregateType())
1110     return false;
1111 
1112   if (auto *ST = dyn_cast<StructType>(T)) {
1113     // If the struct only have one element, we unpack.
1114     unsigned Count = ST->getNumElements();
1115     if (Count == 1) {
1116       V = IC.Builder.CreateExtractValue(V, 0);
1117       combineStoreToNewValue(IC, SI, V);
1118       return true;
1119     }
1120 
1121     // We don't want to break loads with padding here as we'd loose
1122     // the knowledge that padding exists for the rest of the pipeline.
1123     const DataLayout &DL = IC.getDataLayout();
1124     auto *SL = DL.getStructLayout(ST);
1125     if (SL->hasPadding())
1126       return false;
1127 
1128     const auto Align = SI.getAlign();
1129 
1130     SmallString<16> EltName = V->getName();
1131     EltName += ".elt";
1132     auto *Addr = SI.getPointerOperand();
1133     SmallString<16> AddrName = Addr->getName();
1134     AddrName += ".repack";
1135 
1136     auto *IdxType = Type::getInt32Ty(ST->getContext());
1137     auto *Zero = ConstantInt::get(IdxType, 0);
1138     for (unsigned i = 0; i < Count; i++) {
1139       Value *Indices[2] = {
1140         Zero,
1141         ConstantInt::get(IdxType, i),
1142       };
1143       auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1144                                                AddrName);
1145       auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1146       auto EltAlign = commonAlignment(Align, SL->getElementOffset(i));
1147       llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1148       AAMDNodes AAMD;
1149       SI.getAAMetadata(AAMD);
1150       NS->setAAMetadata(AAMD);
1151     }
1152 
1153     return true;
1154   }
1155 
1156   if (auto *AT = dyn_cast<ArrayType>(T)) {
1157     // If the array only have one element, we unpack.
1158     auto NumElements = AT->getNumElements();
1159     if (NumElements == 1) {
1160       V = IC.Builder.CreateExtractValue(V, 0);
1161       combineStoreToNewValue(IC, SI, V);
1162       return true;
1163     }
1164 
1165     // Bail out if the array is too large. Ideally we would like to optimize
1166     // arrays of arbitrary size but this has a terrible impact on compile time.
1167     // The threshold here is chosen arbitrarily, maybe needs a little bit of
1168     // tuning.
1169     if (NumElements > IC.MaxArraySizeForCombine)
1170       return false;
1171 
1172     const DataLayout &DL = IC.getDataLayout();
1173     auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1174     const auto Align = SI.getAlign();
1175 
1176     SmallString<16> EltName = V->getName();
1177     EltName += ".elt";
1178     auto *Addr = SI.getPointerOperand();
1179     SmallString<16> AddrName = Addr->getName();
1180     AddrName += ".repack";
1181 
1182     auto *IdxType = Type::getInt64Ty(T->getContext());
1183     auto *Zero = ConstantInt::get(IdxType, 0);
1184 
1185     uint64_t Offset = 0;
1186     for (uint64_t i = 0; i < NumElements; i++) {
1187       Value *Indices[2] = {
1188         Zero,
1189         ConstantInt::get(IdxType, i),
1190       };
1191       auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1192                                                AddrName);
1193       auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1194       auto EltAlign = commonAlignment(Align, Offset);
1195       Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1196       AAMDNodes AAMD;
1197       SI.getAAMetadata(AAMD);
1198       NS->setAAMetadata(AAMD);
1199       Offset += EltSize;
1200     }
1201 
1202     return true;
1203   }
1204 
1205   return false;
1206 }
1207 
1208 /// equivalentAddressValues - Test if A and B will obviously have the same
1209 /// value. This includes recognizing that %t0 and %t1 will have the same
1210 /// value in code like this:
1211 ///   %t0 = getelementptr \@a, 0, 3
1212 ///   store i32 0, i32* %t0
1213 ///   %t1 = getelementptr \@a, 0, 3
1214 ///   %t2 = load i32* %t1
1215 ///
1216 static bool equivalentAddressValues(Value *A, Value *B) {
1217   // Test if the values are trivially equivalent.
1218   if (A == B) return true;
1219 
1220   // Test if the values come form identical arithmetic instructions.
1221   // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1222   // its only used to compare two uses within the same basic block, which
1223   // means that they'll always either have the same value or one of them
1224   // will have an undefined value.
1225   if (isa<BinaryOperator>(A) ||
1226       isa<CastInst>(A) ||
1227       isa<PHINode>(A) ||
1228       isa<GetElementPtrInst>(A))
1229     if (Instruction *BI = dyn_cast<Instruction>(B))
1230       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1231         return true;
1232 
1233   // Otherwise they may not be equivalent.
1234   return false;
1235 }
1236 
1237 /// Converts store (bitcast (load (bitcast (select ...)))) to
1238 /// store (load (select ...)), where select is minmax:
1239 /// select ((cmp load V1, load V2), V1, V2).
1240 static bool removeBitcastsFromLoadStoreOnMinMax(InstCombinerImpl &IC,
1241                                                 StoreInst &SI) {
1242   // bitcast?
1243   if (!match(SI.getPointerOperand(), m_BitCast(m_Value())))
1244     return false;
1245   // load? integer?
1246   Value *LoadAddr;
1247   if (!match(SI.getValueOperand(), m_Load(m_BitCast(m_Value(LoadAddr)))))
1248     return false;
1249   auto *LI = cast<LoadInst>(SI.getValueOperand());
1250   if (!LI->getType()->isIntegerTy())
1251     return false;
1252   Type *CmpLoadTy;
1253   if (!isMinMaxWithLoads(LoadAddr, CmpLoadTy))
1254     return false;
1255 
1256   // Make sure the type would actually change.
1257   // This condition can be hit with chains of bitcasts.
1258   if (LI->getType() == CmpLoadTy)
1259     return false;
1260 
1261   // Make sure we're not changing the size of the load/store.
1262   const auto &DL = IC.getDataLayout();
1263   if (DL.getTypeStoreSizeInBits(LI->getType()) !=
1264       DL.getTypeStoreSizeInBits(CmpLoadTy))
1265     return false;
1266 
1267   if (!all_of(LI->users(), [LI, LoadAddr](User *U) {
1268         auto *SI = dyn_cast<StoreInst>(U);
1269         return SI && SI->getPointerOperand() != LI &&
1270                InstCombiner::peekThroughBitcast(SI->getPointerOperand()) !=
1271                    LoadAddr &&
1272                !SI->getPointerOperand()->isSwiftError();
1273       }))
1274     return false;
1275 
1276   IC.Builder.SetInsertPoint(LI);
1277   LoadInst *NewLI = IC.combineLoadToNewType(*LI, CmpLoadTy);
1278   // Replace all the stores with stores of the newly loaded value.
1279   for (auto *UI : LI->users()) {
1280     auto *USI = cast<StoreInst>(UI);
1281     IC.Builder.SetInsertPoint(USI);
1282     combineStoreToNewValue(IC, *USI, NewLI);
1283   }
1284   IC.replaceInstUsesWith(*LI, UndefValue::get(LI->getType()));
1285   IC.eraseInstFromFunction(*LI);
1286   return true;
1287 }
1288 
1289 Instruction *InstCombinerImpl::visitStoreInst(StoreInst &SI) {
1290   Value *Val = SI.getOperand(0);
1291   Value *Ptr = SI.getOperand(1);
1292 
1293   // Try to canonicalize the stored type.
1294   if (combineStoreToValueType(*this, SI))
1295     return eraseInstFromFunction(SI);
1296 
1297   // Attempt to improve the alignment.
1298   const Align KnownAlign = getOrEnforceKnownAlignment(
1299       Ptr, DL.getPrefTypeAlign(Val->getType()), DL, &SI, &AC, &DT);
1300   if (KnownAlign > SI.getAlign())
1301     SI.setAlignment(KnownAlign);
1302 
1303   // Try to canonicalize the stored type.
1304   if (unpackStoreToAggregate(*this, SI))
1305     return eraseInstFromFunction(SI);
1306 
1307   if (removeBitcastsFromLoadStoreOnMinMax(*this, SI))
1308     return eraseInstFromFunction(SI);
1309 
1310   // Replace GEP indices if possible.
1311   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1312       Worklist.push(NewGEPI);
1313       return &SI;
1314   }
1315 
1316   // Don't hack volatile/ordered stores.
1317   // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1318   if (!SI.isUnordered()) return nullptr;
1319 
1320   // If the RHS is an alloca with a single use, zapify the store, making the
1321   // alloca dead.
1322   if (Ptr->hasOneUse()) {
1323     if (isa<AllocaInst>(Ptr))
1324       return eraseInstFromFunction(SI);
1325     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1326       if (isa<AllocaInst>(GEP->getOperand(0))) {
1327         if (GEP->getOperand(0)->hasOneUse())
1328           return eraseInstFromFunction(SI);
1329       }
1330     }
1331   }
1332 
1333   // If we have a store to a location which is known constant, we can conclude
1334   // that the store must be storing the constant value (else the memory
1335   // wouldn't be constant), and this must be a noop.
1336   if (AA->pointsToConstantMemory(Ptr))
1337     return eraseInstFromFunction(SI);
1338 
1339   // Do really simple DSE, to catch cases where there are several consecutive
1340   // stores to the same location, separated by a few arithmetic operations. This
1341   // situation often occurs with bitfield accesses.
1342   BasicBlock::iterator BBI(SI);
1343   for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1344        --ScanInsts) {
1345     --BBI;
1346     // Don't count debug info directives, lest they affect codegen,
1347     // and we skip pointer-to-pointer bitcasts, which are NOPs.
1348     if (isa<DbgInfoIntrinsic>(BBI) ||
1349         (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1350       ScanInsts++;
1351       continue;
1352     }
1353 
1354     if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1355       // Prev store isn't volatile, and stores to the same location?
1356       if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1357                                                         SI.getOperand(1))) {
1358         ++NumDeadStore;
1359         // Manually add back the original store to the worklist now, so it will
1360         // be processed after the operands of the removed store, as this may
1361         // expose additional DSE opportunities.
1362         Worklist.push(&SI);
1363         eraseInstFromFunction(*PrevSI);
1364         return nullptr;
1365       }
1366       break;
1367     }
1368 
1369     // If this is a load, we have to stop.  However, if the loaded value is from
1370     // the pointer we're loading and is producing the pointer we're storing,
1371     // then *this* store is dead (X = load P; store X -> P).
1372     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1373       if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1374         assert(SI.isUnordered() && "can't eliminate ordering operation");
1375         return eraseInstFromFunction(SI);
1376       }
1377 
1378       // Otherwise, this is a load from some other location.  Stores before it
1379       // may not be dead.
1380       break;
1381     }
1382 
1383     // Don't skip over loads, throws or things that can modify memory.
1384     if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1385       break;
1386   }
1387 
1388   // store X, null    -> turns into 'unreachable' in SimplifyCFG
1389   // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1390   if (canSimplifyNullStoreOrGEP(SI)) {
1391     if (!isa<UndefValue>(Val))
1392       return replaceOperand(SI, 0, UndefValue::get(Val->getType()));
1393     return nullptr;  // Do not modify these!
1394   }
1395 
1396   // store undef, Ptr -> noop
1397   if (isa<UndefValue>(Val))
1398     return eraseInstFromFunction(SI);
1399 
1400   return nullptr;
1401 }
1402 
1403 /// Try to transform:
1404 ///   if () { *P = v1; } else { *P = v2 }
1405 /// or:
1406 ///   *P = v1; if () { *P = v2; }
1407 /// into a phi node with a store in the successor.
1408 bool InstCombinerImpl::mergeStoreIntoSuccessor(StoreInst &SI) {
1409   if (!SI.isUnordered())
1410     return false; // This code has not been audited for volatile/ordered case.
1411 
1412   // Check if the successor block has exactly 2 incoming edges.
1413   BasicBlock *StoreBB = SI.getParent();
1414   BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1415   if (!DestBB->hasNPredecessors(2))
1416     return false;
1417 
1418   // Capture the other block (the block that doesn't contain our store).
1419   pred_iterator PredIter = pred_begin(DestBB);
1420   if (*PredIter == StoreBB)
1421     ++PredIter;
1422   BasicBlock *OtherBB = *PredIter;
1423 
1424   // Bail out if all of the relevant blocks aren't distinct. This can happen,
1425   // for example, if SI is in an infinite loop.
1426   if (StoreBB == DestBB || OtherBB == DestBB)
1427     return false;
1428 
1429   // Verify that the other block ends in a branch and is not otherwise empty.
1430   BasicBlock::iterator BBI(OtherBB->getTerminator());
1431   BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1432   if (!OtherBr || BBI == OtherBB->begin())
1433     return false;
1434 
1435   // If the other block ends in an unconditional branch, check for the 'if then
1436   // else' case. There is an instruction before the branch.
1437   StoreInst *OtherStore = nullptr;
1438   if (OtherBr->isUnconditional()) {
1439     --BBI;
1440     // Skip over debugging info.
1441     while (isa<DbgInfoIntrinsic>(BBI) ||
1442            (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1443       if (BBI==OtherBB->begin())
1444         return false;
1445       --BBI;
1446     }
1447     // If this isn't a store, isn't a store to the same location, or is not the
1448     // right kind of store, bail out.
1449     OtherStore = dyn_cast<StoreInst>(BBI);
1450     if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1451         !SI.isSameOperationAs(OtherStore))
1452       return false;
1453   } else {
1454     // Otherwise, the other block ended with a conditional branch. If one of the
1455     // destinations is StoreBB, then we have the if/then case.
1456     if (OtherBr->getSuccessor(0) != StoreBB &&
1457         OtherBr->getSuccessor(1) != StoreBB)
1458       return false;
1459 
1460     // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1461     // if/then triangle. See if there is a store to the same ptr as SI that
1462     // lives in OtherBB.
1463     for (;; --BBI) {
1464       // Check to see if we find the matching store.
1465       if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1466         if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1467             !SI.isSameOperationAs(OtherStore))
1468           return false;
1469         break;
1470       }
1471       // If we find something that may be using or overwriting the stored
1472       // value, or if we run out of instructions, we can't do the transform.
1473       if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1474           BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1475         return false;
1476     }
1477 
1478     // In order to eliminate the store in OtherBr, we have to make sure nothing
1479     // reads or overwrites the stored value in StoreBB.
1480     for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1481       // FIXME: This should really be AA driven.
1482       if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1483         return false;
1484     }
1485   }
1486 
1487   // Insert a PHI node now if we need it.
1488   Value *MergedVal = OtherStore->getOperand(0);
1489   // The debug locations of the original instructions might differ. Merge them.
1490   DebugLoc MergedLoc = DILocation::getMergedLocation(SI.getDebugLoc(),
1491                                                      OtherStore->getDebugLoc());
1492   if (MergedVal != SI.getOperand(0)) {
1493     PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1494     PN->addIncoming(SI.getOperand(0), SI.getParent());
1495     PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1496     MergedVal = InsertNewInstBefore(PN, DestBB->front());
1497     PN->setDebugLoc(MergedLoc);
1498   }
1499 
1500   // Advance to a place where it is safe to insert the new store and insert it.
1501   BBI = DestBB->getFirstInsertionPt();
1502   StoreInst *NewSI =
1503       new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
1504                     SI.getOrdering(), SI.getSyncScopeID());
1505   InsertNewInstBefore(NewSI, *BBI);
1506   NewSI->setDebugLoc(MergedLoc);
1507 
1508   // If the two stores had AA tags, merge them.
1509   AAMDNodes AATags;
1510   SI.getAAMetadata(AATags);
1511   if (AATags) {
1512     OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1513     NewSI->setAAMetadata(AATags);
1514   }
1515 
1516   // Nuke the old stores.
1517   eraseInstFromFunction(SI);
1518   eraseInstFromFunction(*OtherStore);
1519   return true;
1520 }
1521