1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visit functions for load, store and alloca.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/Loads.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/LLVMContext.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/IR/MDBuilder.h"
22 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
23 #include "llvm/Transforms/Utils/Local.h"
24 using namespace llvm;
25 
26 #define DEBUG_TYPE "instcombine"
27 
28 STATISTIC(NumDeadStore,    "Number of dead stores eliminated");
29 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
30 
31 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
32 /// some part of a constant global variable.  This intentionally only accepts
33 /// constant expressions because we can't rewrite arbitrary instructions.
34 static bool pointsToConstantGlobal(Value *V) {
35   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
36     return GV->isConstant();
37 
38   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
39     if (CE->getOpcode() == Instruction::BitCast ||
40         CE->getOpcode() == Instruction::AddrSpaceCast ||
41         CE->getOpcode() == Instruction::GetElementPtr)
42       return pointsToConstantGlobal(CE->getOperand(0));
43   }
44   return false;
45 }
46 
47 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
48 /// pointer to an alloca.  Ignore any reads of the pointer, return false if we
49 /// see any stores or other unknown uses.  If we see pointer arithmetic, keep
50 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
51 /// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
52 /// the alloca, and if the source pointer is a pointer to a constant global, we
53 /// can optimize this.
54 static bool
55 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
56                                SmallVectorImpl<Instruction *> &ToDelete) {
57   // We track lifetime intrinsics as we encounter them.  If we decide to go
58   // ahead and replace the value with the global, this lets the caller quickly
59   // eliminate the markers.
60 
61   SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
62   ValuesToInspect.emplace_back(V, false);
63   while (!ValuesToInspect.empty()) {
64     auto ValuePair = ValuesToInspect.pop_back_val();
65     const bool IsOffset = ValuePair.second;
66     for (auto &U : ValuePair.first->uses()) {
67       auto *I = cast<Instruction>(U.getUser());
68 
69       if (auto *LI = dyn_cast<LoadInst>(I)) {
70         // Ignore non-volatile loads, they are always ok.
71         if (!LI->isSimple()) return false;
72         continue;
73       }
74 
75       if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
76         // If uses of the bitcast are ok, we are ok.
77         ValuesToInspect.emplace_back(I, IsOffset);
78         continue;
79       }
80       if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
81         // If the GEP has all zero indices, it doesn't offset the pointer. If it
82         // doesn't, it does.
83         ValuesToInspect.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
84         continue;
85       }
86 
87       if (auto CS = CallSite(I)) {
88         // If this is the function being called then we treat it like a load and
89         // ignore it.
90         if (CS.isCallee(&U))
91           continue;
92 
93         unsigned DataOpNo = CS.getDataOperandNo(&U);
94         bool IsArgOperand = CS.isArgOperand(&U);
95 
96         // Inalloca arguments are clobbered by the call.
97         if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
98           return false;
99 
100         // If this is a readonly/readnone call site, then we know it is just a
101         // load (but one that potentially returns the value itself), so we can
102         // ignore it if we know that the value isn't captured.
103         if (CS.onlyReadsMemory() &&
104             (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
105           continue;
106 
107         // If this is being passed as a byval argument, the caller is making a
108         // copy, so it is only a read of the alloca.
109         if (IsArgOperand && CS.isByValArgument(DataOpNo))
110           continue;
111       }
112 
113       // Lifetime intrinsics can be handled by the caller.
114       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
115         if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
116             II->getIntrinsicID() == Intrinsic::lifetime_end) {
117           assert(II->use_empty() && "Lifetime markers have no result to use!");
118           ToDelete.push_back(II);
119           continue;
120         }
121       }
122 
123       // If this is isn't our memcpy/memmove, reject it as something we can't
124       // handle.
125       MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
126       if (!MI)
127         return false;
128 
129       // If the transfer is using the alloca as a source of the transfer, then
130       // ignore it since it is a load (unless the transfer is volatile).
131       if (U.getOperandNo() == 1) {
132         if (MI->isVolatile()) return false;
133         continue;
134       }
135 
136       // If we already have seen a copy, reject the second one.
137       if (TheCopy) return false;
138 
139       // If the pointer has been offset from the start of the alloca, we can't
140       // safely handle this.
141       if (IsOffset) return false;
142 
143       // If the memintrinsic isn't using the alloca as the dest, reject it.
144       if (U.getOperandNo() != 0) return false;
145 
146       // If the source of the memcpy/move is not a constant global, reject it.
147       if (!pointsToConstantGlobal(MI->getSource()))
148         return false;
149 
150       // Otherwise, the transform is safe.  Remember the copy instruction.
151       TheCopy = MI;
152     }
153   }
154   return true;
155 }
156 
157 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
158 /// modified by a copy from a constant global.  If we can prove this, we can
159 /// replace any uses of the alloca with uses of the global directly.
160 static MemTransferInst *
161 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
162                                SmallVectorImpl<Instruction *> &ToDelete) {
163   MemTransferInst *TheCopy = nullptr;
164   if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
165     return TheCopy;
166   return nullptr;
167 }
168 
169 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
170   // Check for array size of 1 (scalar allocation).
171   if (!AI.isArrayAllocation()) {
172     // i32 1 is the canonical array size for scalar allocations.
173     if (AI.getArraySize()->getType()->isIntegerTy(32))
174       return nullptr;
175 
176     // Canonicalize it.
177     Value *V = IC.Builder->getInt32(1);
178     AI.setOperand(0, V);
179     return &AI;
180   }
181 
182   // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
183   if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
184     Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
185     AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
186     New->setAlignment(AI.getAlignment());
187 
188     // Scan to the end of the allocation instructions, to skip over a block of
189     // allocas if possible...also skip interleaved debug info
190     //
191     BasicBlock::iterator It(New);
192     while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
193       ++It;
194 
195     // Now that I is pointing to the first non-allocation-inst in the block,
196     // insert our getelementptr instruction...
197     //
198     Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
199     Value *NullIdx = Constant::getNullValue(IdxTy);
200     Value *Idx[2] = {NullIdx, NullIdx};
201     Instruction *GEP =
202         GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
203     IC.InsertNewInstBefore(GEP, *It);
204 
205     // Now make everything use the getelementptr instead of the original
206     // allocation.
207     return IC.replaceInstUsesWith(AI, GEP);
208   }
209 
210   if (isa<UndefValue>(AI.getArraySize()))
211     return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
212 
213   // Ensure that the alloca array size argument has type intptr_t, so that
214   // any casting is exposed early.
215   Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
216   if (AI.getArraySize()->getType() != IntPtrTy) {
217     Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
218     AI.setOperand(0, V);
219     return &AI;
220   }
221 
222   return nullptr;
223 }
224 
225 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
226   if (auto *I = simplifyAllocaArraySize(*this, AI))
227     return I;
228 
229   if (AI.getAllocatedType()->isSized()) {
230     // If the alignment is 0 (unspecified), assign it the preferred alignment.
231     if (AI.getAlignment() == 0)
232       AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
233 
234     // Move all alloca's of zero byte objects to the entry block and merge them
235     // together.  Note that we only do this for alloca's, because malloc should
236     // allocate and return a unique pointer, even for a zero byte allocation.
237     if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
238       // For a zero sized alloca there is no point in doing an array allocation.
239       // This is helpful if the array size is a complicated expression not used
240       // elsewhere.
241       if (AI.isArrayAllocation()) {
242         AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
243         return &AI;
244       }
245 
246       // Get the first instruction in the entry block.
247       BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
248       Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
249       if (FirstInst != &AI) {
250         // If the entry block doesn't start with a zero-size alloca then move
251         // this one to the start of the entry block.  There is no problem with
252         // dominance as the array size was forced to a constant earlier already.
253         AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
254         if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
255             DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
256           AI.moveBefore(FirstInst);
257           return &AI;
258         }
259 
260         // If the alignment of the entry block alloca is 0 (unspecified),
261         // assign it the preferred alignment.
262         if (EntryAI->getAlignment() == 0)
263           EntryAI->setAlignment(
264               DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
265         // Replace this zero-sized alloca with the one at the start of the entry
266         // block after ensuring that the address will be aligned enough for both
267         // types.
268         unsigned MaxAlign = std::max(EntryAI->getAlignment(),
269                                      AI.getAlignment());
270         EntryAI->setAlignment(MaxAlign);
271         if (AI.getType() != EntryAI->getType())
272           return new BitCastInst(EntryAI, AI.getType());
273         return replaceInstUsesWith(AI, EntryAI);
274       }
275     }
276   }
277 
278   if (AI.getAlignment()) {
279     // Check to see if this allocation is only modified by a memcpy/memmove from
280     // a constant global whose alignment is equal to or exceeds that of the
281     // allocation.  If this is the case, we can change all users to use
282     // the constant global instead.  This is commonly produced by the CFE by
283     // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
284     // is only subsequently read.
285     SmallVector<Instruction *, 4> ToDelete;
286     if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
287       unsigned SourceAlign = getOrEnforceKnownAlignment(
288           Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
289       if (AI.getAlignment() <= SourceAlign) {
290         DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
291         DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
292         for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
293           eraseInstFromFunction(*ToDelete[i]);
294         Constant *TheSrc = cast<Constant>(Copy->getSource());
295         Constant *Cast
296           = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
297         Instruction *NewI = replaceInstUsesWith(AI, Cast);
298         eraseInstFromFunction(*Copy);
299         ++NumGlobalCopies;
300         return NewI;
301       }
302     }
303   }
304 
305   // At last, use the generic allocation site handler to aggressively remove
306   // unused allocas.
307   return visitAllocSite(AI);
308 }
309 
310 /// \brief Helper to combine a load to a new type.
311 ///
312 /// This just does the work of combining a load to a new type. It handles
313 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
314 /// loaded *value* type. This will convert it to a pointer, cast the operand to
315 /// that pointer type, load it, etc.
316 ///
317 /// Note that this will create all of the instructions with whatever insert
318 /// point the \c InstCombiner currently is using.
319 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
320                                       const Twine &Suffix = "") {
321   Value *Ptr = LI.getPointerOperand();
322   unsigned AS = LI.getPointerAddressSpace();
323   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
324   LI.getAllMetadata(MD);
325 
326   LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
327       IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
328       LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
329   NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
330   MDBuilder MDB(NewLoad->getContext());
331   for (const auto &MDPair : MD) {
332     unsigned ID = MDPair.first;
333     MDNode *N = MDPair.second;
334     // Note, essentially every kind of metadata should be preserved here! This
335     // routine is supposed to clone a load instruction changing *only its type*.
336     // The only metadata it makes sense to drop is metadata which is invalidated
337     // when the pointer type changes. This should essentially never be the case
338     // in LLVM, but we explicitly switch over only known metadata to be
339     // conservatively correct. If you are adding metadata to LLVM which pertains
340     // to loads, you almost certainly want to add it here.
341     switch (ID) {
342     case LLVMContext::MD_dbg:
343     case LLVMContext::MD_tbaa:
344     case LLVMContext::MD_prof:
345     case LLVMContext::MD_fpmath:
346     case LLVMContext::MD_tbaa_struct:
347     case LLVMContext::MD_invariant_load:
348     case LLVMContext::MD_alias_scope:
349     case LLVMContext::MD_noalias:
350     case LLVMContext::MD_nontemporal:
351     case LLVMContext::MD_mem_parallel_loop_access:
352       // All of these directly apply.
353       NewLoad->setMetadata(ID, N);
354       break;
355 
356     case LLVMContext::MD_nonnull:
357       // This only directly applies if the new type is also a pointer.
358       if (NewTy->isPointerTy()) {
359         NewLoad->setMetadata(ID, N);
360         break;
361       }
362       // If it's integral now, translate it to !range metadata.
363       if (NewTy->isIntegerTy()) {
364         auto *ITy = cast<IntegerType>(NewTy);
365         auto *NullInt = ConstantExpr::getPtrToInt(
366             ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
367         auto *NonNullInt =
368             ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
369         NewLoad->setMetadata(LLVMContext::MD_range,
370                              MDB.createRange(NonNullInt, NullInt));
371       }
372       break;
373     case LLVMContext::MD_align:
374     case LLVMContext::MD_dereferenceable:
375     case LLVMContext::MD_dereferenceable_or_null:
376       // These only directly apply if the new type is also a pointer.
377       if (NewTy->isPointerTy())
378         NewLoad->setMetadata(ID, N);
379       break;
380     case LLVMContext::MD_range:
381       // FIXME: It would be nice to propagate this in some way, but the type
382       // conversions make it hard.
383 
384       // If it's a pointer now and the range does not contain 0, make it !nonnull.
385       if (NewTy->isPointerTy()) {
386         unsigned BitWidth = IC.getDataLayout().getTypeSizeInBits(NewTy);
387         if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
388           MDNode *NN = MDNode::get(LI.getContext(), None);
389           NewLoad->setMetadata(LLVMContext::MD_nonnull, NN);
390         }
391       }
392       break;
393     }
394   }
395   return NewLoad;
396 }
397 
398 /// \brief Combine a store to a new type.
399 ///
400 /// Returns the newly created store instruction.
401 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
402   Value *Ptr = SI.getPointerOperand();
403   unsigned AS = SI.getPointerAddressSpace();
404   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
405   SI.getAllMetadata(MD);
406 
407   StoreInst *NewStore = IC.Builder->CreateAlignedStore(
408       V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
409       SI.getAlignment(), SI.isVolatile());
410   NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
411   for (const auto &MDPair : MD) {
412     unsigned ID = MDPair.first;
413     MDNode *N = MDPair.second;
414     // Note, essentially every kind of metadata should be preserved here! This
415     // routine is supposed to clone a store instruction changing *only its
416     // type*. The only metadata it makes sense to drop is metadata which is
417     // invalidated when the pointer type changes. This should essentially
418     // never be the case in LLVM, but we explicitly switch over only known
419     // metadata to be conservatively correct. If you are adding metadata to
420     // LLVM which pertains to stores, you almost certainly want to add it
421     // here.
422     switch (ID) {
423     case LLVMContext::MD_dbg:
424     case LLVMContext::MD_tbaa:
425     case LLVMContext::MD_prof:
426     case LLVMContext::MD_fpmath:
427     case LLVMContext::MD_tbaa_struct:
428     case LLVMContext::MD_alias_scope:
429     case LLVMContext::MD_noalias:
430     case LLVMContext::MD_nontemporal:
431     case LLVMContext::MD_mem_parallel_loop_access:
432       // All of these directly apply.
433       NewStore->setMetadata(ID, N);
434       break;
435 
436     case LLVMContext::MD_invariant_load:
437     case LLVMContext::MD_nonnull:
438     case LLVMContext::MD_range:
439     case LLVMContext::MD_align:
440     case LLVMContext::MD_dereferenceable:
441     case LLVMContext::MD_dereferenceable_or_null:
442       // These don't apply for stores.
443       break;
444     }
445   }
446 
447   return NewStore;
448 }
449 
450 /// \brief Combine loads to match the type of their uses' value after looking
451 /// through intervening bitcasts.
452 ///
453 /// The core idea here is that if the result of a load is used in an operation,
454 /// we should load the type most conducive to that operation. For example, when
455 /// loading an integer and converting that immediately to a pointer, we should
456 /// instead directly load a pointer.
457 ///
458 /// However, this routine must never change the width of a load or the number of
459 /// loads as that would introduce a semantic change. This combine is expected to
460 /// be a semantic no-op which just allows loads to more closely model the types
461 /// of their consuming operations.
462 ///
463 /// Currently, we also refuse to change the precise type used for an atomic load
464 /// or a volatile load. This is debatable, and might be reasonable to change
465 /// later. However, it is risky in case some backend or other part of LLVM is
466 /// relying on the exact type loaded to select appropriate atomic operations.
467 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
468   // FIXME: We could probably with some care handle both volatile and ordered
469   // atomic loads here but it isn't clear that this is important.
470   if (!LI.isUnordered())
471     return nullptr;
472 
473   if (LI.use_empty())
474     return nullptr;
475 
476   // swifterror values can't be bitcasted.
477   if (LI.getPointerOperand()->isSwiftError())
478     return nullptr;
479 
480   Type *Ty = LI.getType();
481   const DataLayout &DL = IC.getDataLayout();
482 
483   // Try to canonicalize loads which are only ever stored to operate over
484   // integers instead of any other type. We only do this when the loaded type
485   // is sized and has a size exactly the same as its store size and the store
486   // size is a legal integer type.
487   if (!Ty->isIntegerTy() && Ty->isSized() &&
488       DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
489       DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty) &&
490       !DL.isNonIntegralPointerType(Ty)) {
491     if (all_of(LI.users(), [&LI](User *U) {
492           auto *SI = dyn_cast<StoreInst>(U);
493           return SI && SI->getPointerOperand() != &LI;
494         })) {
495       LoadInst *NewLoad = combineLoadToNewType(
496           IC, LI,
497           Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
498       // Replace all the stores with stores of the newly loaded value.
499       for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
500         auto *SI = cast<StoreInst>(*UI++);
501         IC.Builder->SetInsertPoint(SI);
502         combineStoreToNewValue(IC, *SI, NewLoad);
503         IC.eraseInstFromFunction(*SI);
504       }
505       assert(LI.use_empty() && "Failed to remove all users of the load!");
506       // Return the old load so the combiner can delete it safely.
507       return &LI;
508     }
509   }
510 
511   // Fold away bit casts of the loaded value by loading the desired type.
512   // We can do this for BitCastInsts as well as casts from and to pointer types,
513   // as long as those are noops (i.e., the source or dest type have the same
514   // bitwidth as the target's pointers).
515   if (LI.hasOneUse())
516     if (auto* CI = dyn_cast<CastInst>(LI.user_back())) {
517       if (CI->isNoopCast(DL)) {
518         LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
519         CI->replaceAllUsesWith(NewLoad);
520         IC.eraseInstFromFunction(*CI);
521         return &LI;
522       }
523     }
524 
525   // FIXME: We should also canonicalize loads of vectors when their elements are
526   // cast to other types.
527   return nullptr;
528 }
529 
530 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
531   // FIXME: We could probably with some care handle both volatile and atomic
532   // stores here but it isn't clear that this is important.
533   if (!LI.isSimple())
534     return nullptr;
535 
536   Type *T = LI.getType();
537   if (!T->isAggregateType())
538     return nullptr;
539 
540   StringRef Name = LI.getName();
541   assert(LI.getAlignment() && "Alignment must be set at this point");
542 
543   if (auto *ST = dyn_cast<StructType>(T)) {
544     // If the struct only have one element, we unpack.
545     auto NumElements = ST->getNumElements();
546     if (NumElements == 1) {
547       LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
548                                                ".unpack");
549       return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
550         UndefValue::get(T), NewLoad, 0, Name));
551     }
552 
553     // We don't want to break loads with padding here as we'd loose
554     // the knowledge that padding exists for the rest of the pipeline.
555     const DataLayout &DL = IC.getDataLayout();
556     auto *SL = DL.getStructLayout(ST);
557     if (SL->hasPadding())
558       return nullptr;
559 
560     auto Align = LI.getAlignment();
561     if (!Align)
562       Align = DL.getABITypeAlignment(ST);
563 
564     auto *Addr = LI.getPointerOperand();
565     auto *IdxType = Type::getInt32Ty(T->getContext());
566     auto *Zero = ConstantInt::get(IdxType, 0);
567 
568     Value *V = UndefValue::get(T);
569     for (unsigned i = 0; i < NumElements; i++) {
570       Value *Indices[2] = {
571         Zero,
572         ConstantInt::get(IdxType, i),
573       };
574       auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
575                                                 Name + ".elt");
576       auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
577       auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
578       V = IC.Builder->CreateInsertValue(V, L, i);
579     }
580 
581     V->setName(Name);
582     return IC.replaceInstUsesWith(LI, V);
583   }
584 
585   if (auto *AT = dyn_cast<ArrayType>(T)) {
586     auto *ET = AT->getElementType();
587     auto NumElements = AT->getNumElements();
588     if (NumElements == 1) {
589       LoadInst *NewLoad = combineLoadToNewType(IC, LI, ET, ".unpack");
590       return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
591         UndefValue::get(T), NewLoad, 0, Name));
592     }
593 
594     // Bail out if the array is too large. Ideally we would like to optimize
595     // arrays of arbitrary size but this has a terrible impact on compile time.
596     // The threshold here is chosen arbitrarily, maybe needs a little bit of
597     // tuning.
598     if (NumElements > 1024)
599       return nullptr;
600 
601     const DataLayout &DL = IC.getDataLayout();
602     auto EltSize = DL.getTypeAllocSize(ET);
603     auto Align = LI.getAlignment();
604     if (!Align)
605       Align = DL.getABITypeAlignment(T);
606 
607     auto *Addr = LI.getPointerOperand();
608     auto *IdxType = Type::getInt64Ty(T->getContext());
609     auto *Zero = ConstantInt::get(IdxType, 0);
610 
611     Value *V = UndefValue::get(T);
612     uint64_t Offset = 0;
613     for (uint64_t i = 0; i < NumElements; i++) {
614       Value *Indices[2] = {
615         Zero,
616         ConstantInt::get(IdxType, i),
617       };
618       auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
619                                                 Name + ".elt");
620       auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
621                                               Name + ".unpack");
622       V = IC.Builder->CreateInsertValue(V, L, i);
623       Offset += EltSize;
624     }
625 
626     V->setName(Name);
627     return IC.replaceInstUsesWith(LI, V);
628   }
629 
630   return nullptr;
631 }
632 
633 // If we can determine that all possible objects pointed to by the provided
634 // pointer value are, not only dereferenceable, but also definitively less than
635 // or equal to the provided maximum size, then return true. Otherwise, return
636 // false (constant global values and allocas fall into this category).
637 //
638 // FIXME: This should probably live in ValueTracking (or similar).
639 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
640                                      const DataLayout &DL) {
641   SmallPtrSet<Value *, 4> Visited;
642   SmallVector<Value *, 4> Worklist(1, V);
643 
644   do {
645     Value *P = Worklist.pop_back_val();
646     P = P->stripPointerCasts();
647 
648     if (!Visited.insert(P).second)
649       continue;
650 
651     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
652       Worklist.push_back(SI->getTrueValue());
653       Worklist.push_back(SI->getFalseValue());
654       continue;
655     }
656 
657     if (PHINode *PN = dyn_cast<PHINode>(P)) {
658       for (Value *IncValue : PN->incoming_values())
659         Worklist.push_back(IncValue);
660       continue;
661     }
662 
663     if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
664       if (GA->isInterposable())
665         return false;
666       Worklist.push_back(GA->getAliasee());
667       continue;
668     }
669 
670     // If we know how big this object is, and it is less than MaxSize, continue
671     // searching. Otherwise, return false.
672     if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
673       if (!AI->getAllocatedType()->isSized())
674         return false;
675 
676       ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
677       if (!CS)
678         return false;
679 
680       uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
681       // Make sure that, even if the multiplication below would wrap as an
682       // uint64_t, we still do the right thing.
683       if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
684         return false;
685       continue;
686     }
687 
688     if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
689       if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
690         return false;
691 
692       uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
693       if (InitSize > MaxSize)
694         return false;
695       continue;
696     }
697 
698     return false;
699   } while (!Worklist.empty());
700 
701   return true;
702 }
703 
704 // If we're indexing into an object of a known size, and the outer index is
705 // not a constant, but having any value but zero would lead to undefined
706 // behavior, replace it with zero.
707 //
708 // For example, if we have:
709 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
710 // ...
711 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
712 // ... = load i32* %arrayidx, align 4
713 // Then we know that we can replace %x in the GEP with i64 0.
714 //
715 // FIXME: We could fold any GEP index to zero that would cause UB if it were
716 // not zero. Currently, we only handle the first such index. Also, we could
717 // also search through non-zero constant indices if we kept track of the
718 // offsets those indices implied.
719 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
720                                      Instruction *MemI, unsigned &Idx) {
721   if (GEPI->getNumOperands() < 2)
722     return false;
723 
724   // Find the first non-zero index of a GEP. If all indices are zero, return
725   // one past the last index.
726   auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
727     unsigned I = 1;
728     for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
729       Value *V = GEPI->getOperand(I);
730       if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
731         if (CI->isZero())
732           continue;
733 
734       break;
735     }
736 
737     return I;
738   };
739 
740   // Skip through initial 'zero' indices, and find the corresponding pointer
741   // type. See if the next index is not a constant.
742   Idx = FirstNZIdx(GEPI);
743   if (Idx == GEPI->getNumOperands())
744     return false;
745   if (isa<Constant>(GEPI->getOperand(Idx)))
746     return false;
747 
748   SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
749   Type *AllocTy =
750     GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
751   if (!AllocTy || !AllocTy->isSized())
752     return false;
753   const DataLayout &DL = IC.getDataLayout();
754   uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
755 
756   // If there are more indices after the one we might replace with a zero, make
757   // sure they're all non-negative. If any of them are negative, the overall
758   // address being computed might be before the base address determined by the
759   // first non-zero index.
760   auto IsAllNonNegative = [&]() {
761     for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
762       bool KnownNonNegative, KnownNegative;
763       IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
764                         KnownNegative, 0, MemI);
765       if (KnownNonNegative)
766         continue;
767       return false;
768     }
769 
770     return true;
771   };
772 
773   // FIXME: If the GEP is not inbounds, and there are extra indices after the
774   // one we'll replace, those could cause the address computation to wrap
775   // (rendering the IsAllNonNegative() check below insufficient). We can do
776   // better, ignoring zero indices (and other indices we can prove small
777   // enough not to wrap).
778   if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
779     return false;
780 
781   // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
782   // also known to be dereferenceable.
783   return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
784          IsAllNonNegative();
785 }
786 
787 // If we're indexing into an object with a variable index for the memory
788 // access, but the object has only one element, we can assume that the index
789 // will always be zero. If we replace the GEP, return it.
790 template <typename T>
791 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
792                                           T &MemI) {
793   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
794     unsigned Idx;
795     if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
796       Instruction *NewGEPI = GEPI->clone();
797       NewGEPI->setOperand(Idx,
798         ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
799       NewGEPI->insertBefore(GEPI);
800       MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
801       return NewGEPI;
802     }
803   }
804 
805   return nullptr;
806 }
807 
808 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
809   Value *Op = LI.getOperand(0);
810 
811   // Try to canonicalize the loaded type.
812   if (Instruction *Res = combineLoadToOperationType(*this, LI))
813     return Res;
814 
815   // Attempt to improve the alignment.
816   unsigned KnownAlign = getOrEnforceKnownAlignment(
817       Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
818   unsigned LoadAlign = LI.getAlignment();
819   unsigned EffectiveLoadAlign =
820       LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
821 
822   if (KnownAlign > EffectiveLoadAlign)
823     LI.setAlignment(KnownAlign);
824   else if (LoadAlign == 0)
825     LI.setAlignment(EffectiveLoadAlign);
826 
827   // Replace GEP indices if possible.
828   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
829       Worklist.Add(NewGEPI);
830       return &LI;
831   }
832 
833   if (Instruction *Res = unpackLoadToAggregate(*this, LI))
834     return Res;
835 
836   // Do really simple store-to-load forwarding and load CSE, to catch cases
837   // where there are several consecutive memory accesses to the same location,
838   // separated by a few arithmetic operations.
839   BasicBlock::iterator BBI(LI);
840   bool IsLoadCSE = false;
841   if (Value *AvailableVal =
842       FindAvailableLoadedValue(&LI, LI.getParent(), BBI,
843                                DefMaxInstsToScan, AA, &IsLoadCSE)) {
844     if (IsLoadCSE) {
845       LoadInst *NLI = cast<LoadInst>(AvailableVal);
846       unsigned KnownIDs[] = {
847           LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
848           LLVMContext::MD_noalias,         LLVMContext::MD_range,
849           LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
850           LLVMContext::MD_invariant_group, LLVMContext::MD_align,
851           LLVMContext::MD_dereferenceable,
852           LLVMContext::MD_dereferenceable_or_null};
853       combineMetadata(NLI, &LI, KnownIDs);
854     };
855 
856     return replaceInstUsesWith(
857         LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
858                                             LI.getName() + ".cast"));
859   }
860 
861   // None of the following transforms are legal for volatile/ordered atomic
862   // loads.  Most of them do apply for unordered atomics.
863   if (!LI.isUnordered()) return nullptr;
864 
865   // load(gep null, ...) -> unreachable
866   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
867     const Value *GEPI0 = GEPI->getOperand(0);
868     // TODO: Consider a target hook for valid address spaces for this xform.
869     if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
870       // Insert a new store to null instruction before the load to indicate
871       // that this code is not reachable.  We do this instead of inserting
872       // an unreachable instruction directly because we cannot modify the
873       // CFG.
874       new StoreInst(UndefValue::get(LI.getType()),
875                     Constant::getNullValue(Op->getType()), &LI);
876       return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
877     }
878   }
879 
880   // load null/undef -> unreachable
881   // TODO: Consider a target hook for valid address spaces for this xform.
882   if (isa<UndefValue>(Op) ||
883       (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
884     // Insert a new store to null instruction before the load to indicate that
885     // this code is not reachable.  We do this instead of inserting an
886     // unreachable instruction directly because we cannot modify the CFG.
887     new StoreInst(UndefValue::get(LI.getType()),
888                   Constant::getNullValue(Op->getType()), &LI);
889     return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
890   }
891 
892   if (Op->hasOneUse()) {
893     // Change select and PHI nodes to select values instead of addresses: this
894     // helps alias analysis out a lot, allows many others simplifications, and
895     // exposes redundancy in the code.
896     //
897     // Note that we cannot do the transformation unless we know that the
898     // introduced loads cannot trap!  Something like this is valid as long as
899     // the condition is always false: load (select bool %C, int* null, int* %G),
900     // but it would not be valid if we transformed it to load from null
901     // unconditionally.
902     //
903     if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
904       // load (select (Cond, &V1, &V2))  --> select(Cond, load &V1, load &V2).
905       unsigned Align = LI.getAlignment();
906       if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
907           isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
908         LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
909                                            SI->getOperand(1)->getName()+".val");
910         LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
911                                            SI->getOperand(2)->getName()+".val");
912         assert(LI.isUnordered() && "implied by above");
913         V1->setAlignment(Align);
914         V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
915         V2->setAlignment(Align);
916         V2->setAtomic(LI.getOrdering(), LI.getSynchScope());
917         return SelectInst::Create(SI->getCondition(), V1, V2);
918       }
919 
920       // load (select (cond, null, P)) -> load P
921       if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
922           LI.getPointerAddressSpace() == 0) {
923         LI.setOperand(0, SI->getOperand(2));
924         return &LI;
925       }
926 
927       // load (select (cond, P, null)) -> load P
928       if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
929           LI.getPointerAddressSpace() == 0) {
930         LI.setOperand(0, SI->getOperand(1));
931         return &LI;
932       }
933     }
934   }
935   return nullptr;
936 }
937 
938 /// \brief Look for extractelement/insertvalue sequence that acts like a bitcast.
939 ///
940 /// \returns underlying value that was "cast", or nullptr otherwise.
941 ///
942 /// For example, if we have:
943 ///
944 ///     %E0 = extractelement <2 x double> %U, i32 0
945 ///     %V0 = insertvalue [2 x double] undef, double %E0, 0
946 ///     %E1 = extractelement <2 x double> %U, i32 1
947 ///     %V1 = insertvalue [2 x double] %V0, double %E1, 1
948 ///
949 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
950 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
951 /// Note that %U may contain non-undef values where %V1 has undef.
952 static Value *likeBitCastFromVector(InstCombiner &IC, Value *V) {
953   Value *U = nullptr;
954   while (auto *IV = dyn_cast<InsertValueInst>(V)) {
955     auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
956     if (!E)
957       return nullptr;
958     auto *W = E->getVectorOperand();
959     if (!U)
960       U = W;
961     else if (U != W)
962       return nullptr;
963     auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
964     if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
965       return nullptr;
966     V = IV->getAggregateOperand();
967   }
968   if (!isa<UndefValue>(V) ||!U)
969     return nullptr;
970 
971   auto *UT = cast<VectorType>(U->getType());
972   auto *VT = V->getType();
973   // Check that types UT and VT are bitwise isomorphic.
974   const auto &DL = IC.getDataLayout();
975   if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
976     return nullptr;
977   }
978   if (auto *AT = dyn_cast<ArrayType>(VT)) {
979     if (AT->getNumElements() != UT->getNumElements())
980       return nullptr;
981   } else {
982     auto *ST = cast<StructType>(VT);
983     if (ST->getNumElements() != UT->getNumElements())
984       return nullptr;
985     for (const auto *EltT : ST->elements()) {
986       if (EltT != UT->getElementType())
987         return nullptr;
988     }
989   }
990   return U;
991 }
992 
993 /// \brief Combine stores to match the type of value being stored.
994 ///
995 /// The core idea here is that the memory does not have any intrinsic type and
996 /// where we can we should match the type of a store to the type of value being
997 /// stored.
998 ///
999 /// However, this routine must never change the width of a store or the number of
1000 /// stores as that would introduce a semantic change. This combine is expected to
1001 /// be a semantic no-op which just allows stores to more closely model the types
1002 /// of their incoming values.
1003 ///
1004 /// Currently, we also refuse to change the precise type used for an atomic or
1005 /// volatile store. This is debatable, and might be reasonable to change later.
1006 /// However, it is risky in case some backend or other part of LLVM is relying
1007 /// on the exact type stored to select appropriate atomic operations.
1008 ///
1009 /// \returns true if the store was successfully combined away. This indicates
1010 /// the caller must erase the store instruction. We have to let the caller erase
1011 /// the store instruction as otherwise there is no way to signal whether it was
1012 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
1013 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
1014   // FIXME: We could probably with some care handle both volatile and ordered
1015   // atomic stores here but it isn't clear that this is important.
1016   if (!SI.isUnordered())
1017     return false;
1018 
1019   // swifterror values can't be bitcasted.
1020   if (SI.getPointerOperand()->isSwiftError())
1021     return false;
1022 
1023   Value *V = SI.getValueOperand();
1024 
1025   // Fold away bit casts of the stored value by storing the original type.
1026   if (auto *BC = dyn_cast<BitCastInst>(V)) {
1027     V = BC->getOperand(0);
1028     combineStoreToNewValue(IC, SI, V);
1029     return true;
1030   }
1031 
1032   if (Value *U = likeBitCastFromVector(IC, V)) {
1033     combineStoreToNewValue(IC, SI, U);
1034     return true;
1035   }
1036 
1037   // FIXME: We should also canonicalize stores of vectors when their elements
1038   // are cast to other types.
1039   return false;
1040 }
1041 
1042 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
1043   // FIXME: We could probably with some care handle both volatile and atomic
1044   // stores here but it isn't clear that this is important.
1045   if (!SI.isSimple())
1046     return false;
1047 
1048   Value *V = SI.getValueOperand();
1049   Type *T = V->getType();
1050 
1051   if (!T->isAggregateType())
1052     return false;
1053 
1054   if (auto *ST = dyn_cast<StructType>(T)) {
1055     // If the struct only have one element, we unpack.
1056     unsigned Count = ST->getNumElements();
1057     if (Count == 1) {
1058       V = IC.Builder->CreateExtractValue(V, 0);
1059       combineStoreToNewValue(IC, SI, V);
1060       return true;
1061     }
1062 
1063     // We don't want to break loads with padding here as we'd loose
1064     // the knowledge that padding exists for the rest of the pipeline.
1065     const DataLayout &DL = IC.getDataLayout();
1066     auto *SL = DL.getStructLayout(ST);
1067     if (SL->hasPadding())
1068       return false;
1069 
1070     auto Align = SI.getAlignment();
1071     if (!Align)
1072       Align = DL.getABITypeAlignment(ST);
1073 
1074     SmallString<16> EltName = V->getName();
1075     EltName += ".elt";
1076     auto *Addr = SI.getPointerOperand();
1077     SmallString<16> AddrName = Addr->getName();
1078     AddrName += ".repack";
1079 
1080     auto *IdxType = Type::getInt32Ty(ST->getContext());
1081     auto *Zero = ConstantInt::get(IdxType, 0);
1082     for (unsigned i = 0; i < Count; i++) {
1083       Value *Indices[2] = {
1084         Zero,
1085         ConstantInt::get(IdxType, i),
1086       };
1087       auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
1088                                                 AddrName);
1089       auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
1090       auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
1091       IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
1092     }
1093 
1094     return true;
1095   }
1096 
1097   if (auto *AT = dyn_cast<ArrayType>(T)) {
1098     // If the array only have one element, we unpack.
1099     auto NumElements = AT->getNumElements();
1100     if (NumElements == 1) {
1101       V = IC.Builder->CreateExtractValue(V, 0);
1102       combineStoreToNewValue(IC, SI, V);
1103       return true;
1104     }
1105 
1106     // Bail out if the array is too large. Ideally we would like to optimize
1107     // arrays of arbitrary size but this has a terrible impact on compile time.
1108     // The threshold here is chosen arbitrarily, maybe needs a little bit of
1109     // tuning.
1110     if (NumElements > 1024)
1111       return false;
1112 
1113     const DataLayout &DL = IC.getDataLayout();
1114     auto EltSize = DL.getTypeAllocSize(AT->getElementType());
1115     auto Align = SI.getAlignment();
1116     if (!Align)
1117       Align = DL.getABITypeAlignment(T);
1118 
1119     SmallString<16> EltName = V->getName();
1120     EltName += ".elt";
1121     auto *Addr = SI.getPointerOperand();
1122     SmallString<16> AddrName = Addr->getName();
1123     AddrName += ".repack";
1124 
1125     auto *IdxType = Type::getInt64Ty(T->getContext());
1126     auto *Zero = ConstantInt::get(IdxType, 0);
1127 
1128     uint64_t Offset = 0;
1129     for (uint64_t i = 0; i < NumElements; i++) {
1130       Value *Indices[2] = {
1131         Zero,
1132         ConstantInt::get(IdxType, i),
1133       };
1134       auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
1135                                                 AddrName);
1136       auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
1137       auto EltAlign = MinAlign(Align, Offset);
1138       IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
1139       Offset += EltSize;
1140     }
1141 
1142     return true;
1143   }
1144 
1145   return false;
1146 }
1147 
1148 /// equivalentAddressValues - Test if A and B will obviously have the same
1149 /// value. This includes recognizing that %t0 and %t1 will have the same
1150 /// value in code like this:
1151 ///   %t0 = getelementptr \@a, 0, 3
1152 ///   store i32 0, i32* %t0
1153 ///   %t1 = getelementptr \@a, 0, 3
1154 ///   %t2 = load i32* %t1
1155 ///
1156 static bool equivalentAddressValues(Value *A, Value *B) {
1157   // Test if the values are trivially equivalent.
1158   if (A == B) return true;
1159 
1160   // Test if the values come form identical arithmetic instructions.
1161   // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1162   // its only used to compare two uses within the same basic block, which
1163   // means that they'll always either have the same value or one of them
1164   // will have an undefined value.
1165   if (isa<BinaryOperator>(A) ||
1166       isa<CastInst>(A) ||
1167       isa<PHINode>(A) ||
1168       isa<GetElementPtrInst>(A))
1169     if (Instruction *BI = dyn_cast<Instruction>(B))
1170       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1171         return true;
1172 
1173   // Otherwise they may not be equivalent.
1174   return false;
1175 }
1176 
1177 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1178   Value *Val = SI.getOperand(0);
1179   Value *Ptr = SI.getOperand(1);
1180 
1181   // Try to canonicalize the stored type.
1182   if (combineStoreToValueType(*this, SI))
1183     return eraseInstFromFunction(SI);
1184 
1185   // Attempt to improve the alignment.
1186   unsigned KnownAlign = getOrEnforceKnownAlignment(
1187       Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
1188   unsigned StoreAlign = SI.getAlignment();
1189   unsigned EffectiveStoreAlign =
1190       StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
1191 
1192   if (KnownAlign > EffectiveStoreAlign)
1193     SI.setAlignment(KnownAlign);
1194   else if (StoreAlign == 0)
1195     SI.setAlignment(EffectiveStoreAlign);
1196 
1197   // Try to canonicalize the stored type.
1198   if (unpackStoreToAggregate(*this, SI))
1199     return eraseInstFromFunction(SI);
1200 
1201   // Replace GEP indices if possible.
1202   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1203       Worklist.Add(NewGEPI);
1204       return &SI;
1205   }
1206 
1207   // Don't hack volatile/ordered stores.
1208   // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1209   if (!SI.isUnordered()) return nullptr;
1210 
1211   // If the RHS is an alloca with a single use, zapify the store, making the
1212   // alloca dead.
1213   if (Ptr->hasOneUse()) {
1214     if (isa<AllocaInst>(Ptr))
1215       return eraseInstFromFunction(SI);
1216     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1217       if (isa<AllocaInst>(GEP->getOperand(0))) {
1218         if (GEP->getOperand(0)->hasOneUse())
1219           return eraseInstFromFunction(SI);
1220       }
1221     }
1222   }
1223 
1224   // Do really simple DSE, to catch cases where there are several consecutive
1225   // stores to the same location, separated by a few arithmetic operations. This
1226   // situation often occurs with bitfield accesses.
1227   BasicBlock::iterator BBI(SI);
1228   for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1229        --ScanInsts) {
1230     --BBI;
1231     // Don't count debug info directives, lest they affect codegen,
1232     // and we skip pointer-to-pointer bitcasts, which are NOPs.
1233     if (isa<DbgInfoIntrinsic>(BBI) ||
1234         (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1235       ScanInsts++;
1236       continue;
1237     }
1238 
1239     if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1240       // Prev store isn't volatile, and stores to the same location?
1241       if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1242                                                         SI.getOperand(1))) {
1243         ++NumDeadStore;
1244         ++BBI;
1245         eraseInstFromFunction(*PrevSI);
1246         continue;
1247       }
1248       break;
1249     }
1250 
1251     // If this is a load, we have to stop.  However, if the loaded value is from
1252     // the pointer we're loading and is producing the pointer we're storing,
1253     // then *this* store is dead (X = load P; store X -> P).
1254     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1255       if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1256         assert(SI.isUnordered() && "can't eliminate ordering operation");
1257         return eraseInstFromFunction(SI);
1258       }
1259 
1260       // Otherwise, this is a load from some other location.  Stores before it
1261       // may not be dead.
1262       break;
1263     }
1264 
1265     // Don't skip over loads or things that can modify memory.
1266     if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
1267       break;
1268   }
1269 
1270   // store X, null    -> turns into 'unreachable' in SimplifyCFG
1271   if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
1272     if (!isa<UndefValue>(Val)) {
1273       SI.setOperand(0, UndefValue::get(Val->getType()));
1274       if (Instruction *U = dyn_cast<Instruction>(Val))
1275         Worklist.Add(U);  // Dropped a use.
1276     }
1277     return nullptr;  // Do not modify these!
1278   }
1279 
1280   // store undef, Ptr -> noop
1281   if (isa<UndefValue>(Val))
1282     return eraseInstFromFunction(SI);
1283 
1284   // If this store is the last instruction in the basic block (possibly
1285   // excepting debug info instructions), and if the block ends with an
1286   // unconditional branch, try to move it to the successor block.
1287   BBI = SI.getIterator();
1288   do {
1289     ++BBI;
1290   } while (isa<DbgInfoIntrinsic>(BBI) ||
1291            (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1292   if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1293     if (BI->isUnconditional())
1294       if (SimplifyStoreAtEndOfBlock(SI))
1295         return nullptr;  // xform done!
1296 
1297   return nullptr;
1298 }
1299 
1300 /// SimplifyStoreAtEndOfBlock - Turn things like:
1301 ///   if () { *P = v1; } else { *P = v2 }
1302 /// into a phi node with a store in the successor.
1303 ///
1304 /// Simplify things like:
1305 ///   *P = v1; if () { *P = v2; }
1306 /// into a phi node with a store in the successor.
1307 ///
1308 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
1309   assert(SI.isUnordered() &&
1310          "this code has not been auditted for volatile or ordered store case");
1311 
1312   BasicBlock *StoreBB = SI.getParent();
1313 
1314   // Check to see if the successor block has exactly two incoming edges.  If
1315   // so, see if the other predecessor contains a store to the same location.
1316   // if so, insert a PHI node (if needed) and move the stores down.
1317   BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1318 
1319   // Determine whether Dest has exactly two predecessors and, if so, compute
1320   // the other predecessor.
1321   pred_iterator PI = pred_begin(DestBB);
1322   BasicBlock *P = *PI;
1323   BasicBlock *OtherBB = nullptr;
1324 
1325   if (P != StoreBB)
1326     OtherBB = P;
1327 
1328   if (++PI == pred_end(DestBB))
1329     return false;
1330 
1331   P = *PI;
1332   if (P != StoreBB) {
1333     if (OtherBB)
1334       return false;
1335     OtherBB = P;
1336   }
1337   if (++PI != pred_end(DestBB))
1338     return false;
1339 
1340   // Bail out if all the relevant blocks aren't distinct (this can happen,
1341   // for example, if SI is in an infinite loop)
1342   if (StoreBB == DestBB || OtherBB == DestBB)
1343     return false;
1344 
1345   // Verify that the other block ends in a branch and is not otherwise empty.
1346   BasicBlock::iterator BBI(OtherBB->getTerminator());
1347   BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1348   if (!OtherBr || BBI == OtherBB->begin())
1349     return false;
1350 
1351   // If the other block ends in an unconditional branch, check for the 'if then
1352   // else' case.  there is an instruction before the branch.
1353   StoreInst *OtherStore = nullptr;
1354   if (OtherBr->isUnconditional()) {
1355     --BBI;
1356     // Skip over debugging info.
1357     while (isa<DbgInfoIntrinsic>(BBI) ||
1358            (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1359       if (BBI==OtherBB->begin())
1360         return false;
1361       --BBI;
1362     }
1363     // If this isn't a store, isn't a store to the same location, or is not the
1364     // right kind of store, bail out.
1365     OtherStore = dyn_cast<StoreInst>(BBI);
1366     if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1367         !SI.isSameOperationAs(OtherStore))
1368       return false;
1369   } else {
1370     // Otherwise, the other block ended with a conditional branch. If one of the
1371     // destinations is StoreBB, then we have the if/then case.
1372     if (OtherBr->getSuccessor(0) != StoreBB &&
1373         OtherBr->getSuccessor(1) != StoreBB)
1374       return false;
1375 
1376     // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1377     // if/then triangle.  See if there is a store to the same ptr as SI that
1378     // lives in OtherBB.
1379     for (;; --BBI) {
1380       // Check to see if we find the matching store.
1381       if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1382         if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1383             !SI.isSameOperationAs(OtherStore))
1384           return false;
1385         break;
1386       }
1387       // If we find something that may be using or overwriting the stored
1388       // value, or if we run out of instructions, we can't do the xform.
1389       if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
1390           BBI == OtherBB->begin())
1391         return false;
1392     }
1393 
1394     // In order to eliminate the store in OtherBr, we have to
1395     // make sure nothing reads or overwrites the stored value in
1396     // StoreBB.
1397     for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1398       // FIXME: This should really be AA driven.
1399       if (I->mayReadFromMemory() || I->mayWriteToMemory())
1400         return false;
1401     }
1402   }
1403 
1404   // Insert a PHI node now if we need it.
1405   Value *MergedVal = OtherStore->getOperand(0);
1406   if (MergedVal != SI.getOperand(0)) {
1407     PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1408     PN->addIncoming(SI.getOperand(0), SI.getParent());
1409     PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1410     MergedVal = InsertNewInstBefore(PN, DestBB->front());
1411   }
1412 
1413   // Advance to a place where it is safe to insert the new store and
1414   // insert it.
1415   BBI = DestBB->getFirstInsertionPt();
1416   StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
1417                                    SI.isVolatile(),
1418                                    SI.getAlignment(),
1419                                    SI.getOrdering(),
1420                                    SI.getSynchScope());
1421   InsertNewInstBefore(NewSI, *BBI);
1422   NewSI->setDebugLoc(OtherStore->getDebugLoc());
1423 
1424   // If the two stores had AA tags, merge them.
1425   AAMDNodes AATags;
1426   SI.getAAMetadata(AATags);
1427   if (AATags) {
1428     OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1429     NewSI->setAAMetadata(AATags);
1430   }
1431 
1432   // Nuke the old stores.
1433   eraseInstFromFunction(SI);
1434   eraseInstFromFunction(*OtherStore);
1435   return true;
1436 }
1437