1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visit functions for load, store and alloca.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/Loads.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/LLVMContext.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/IR/MDBuilder.h"
22 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
23 #include "llvm/Transforms/Utils/Local.h"
24 using namespace llvm;
25 
26 #define DEBUG_TYPE "instcombine"
27 
28 STATISTIC(NumDeadStore,    "Number of dead stores eliminated");
29 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
30 
31 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
32 /// some part of a constant global variable.  This intentionally only accepts
33 /// constant expressions because we can't rewrite arbitrary instructions.
34 static bool pointsToConstantGlobal(Value *V) {
35   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
36     return GV->isConstant();
37 
38   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
39     if (CE->getOpcode() == Instruction::BitCast ||
40         CE->getOpcode() == Instruction::AddrSpaceCast ||
41         CE->getOpcode() == Instruction::GetElementPtr)
42       return pointsToConstantGlobal(CE->getOperand(0));
43   }
44   return false;
45 }
46 
47 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
48 /// pointer to an alloca.  Ignore any reads of the pointer, return false if we
49 /// see any stores or other unknown uses.  If we see pointer arithmetic, keep
50 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
51 /// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
52 /// the alloca, and if the source pointer is a pointer to a constant global, we
53 /// can optimize this.
54 static bool
55 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
56                                SmallVectorImpl<Instruction *> &ToDelete) {
57   // We track lifetime intrinsics as we encounter them.  If we decide to go
58   // ahead and replace the value with the global, this lets the caller quickly
59   // eliminate the markers.
60 
61   SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
62   ValuesToInspect.push_back(std::make_pair(V, false));
63   while (!ValuesToInspect.empty()) {
64     auto ValuePair = ValuesToInspect.pop_back_val();
65     const bool IsOffset = ValuePair.second;
66     for (auto &U : ValuePair.first->uses()) {
67       Instruction *I = cast<Instruction>(U.getUser());
68 
69       if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
70         // Ignore non-volatile loads, they are always ok.
71         if (!LI->isSimple()) return false;
72         continue;
73       }
74 
75       if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
76         // If uses of the bitcast are ok, we are ok.
77         ValuesToInspect.push_back(std::make_pair(I, IsOffset));
78         continue;
79       }
80       if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
81         // If the GEP has all zero indices, it doesn't offset the pointer. If it
82         // doesn't, it does.
83         ValuesToInspect.push_back(
84             std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
85         continue;
86       }
87 
88       if (auto CS = CallSite(I)) {
89         // If this is the function being called then we treat it like a load and
90         // ignore it.
91         if (CS.isCallee(&U))
92           continue;
93 
94         unsigned DataOpNo = CS.getDataOperandNo(&U);
95         bool IsArgOperand = CS.isArgOperand(&U);
96 
97         // Inalloca arguments are clobbered by the call.
98         if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
99           return false;
100 
101         // If this is a readonly/readnone call site, then we know it is just a
102         // load (but one that potentially returns the value itself), so we can
103         // ignore it if we know that the value isn't captured.
104         if (CS.onlyReadsMemory() &&
105             (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
106           continue;
107 
108         // If this is being passed as a byval argument, the caller is making a
109         // copy, so it is only a read of the alloca.
110         if (IsArgOperand && CS.isByValArgument(DataOpNo))
111           continue;
112       }
113 
114       // Lifetime intrinsics can be handled by the caller.
115       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
116         if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
117             II->getIntrinsicID() == Intrinsic::lifetime_end) {
118           assert(II->use_empty() && "Lifetime markers have no result to use!");
119           ToDelete.push_back(II);
120           continue;
121         }
122       }
123 
124       // If this is isn't our memcpy/memmove, reject it as something we can't
125       // handle.
126       MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
127       if (!MI)
128         return false;
129 
130       // If the transfer is using the alloca as a source of the transfer, then
131       // ignore it since it is a load (unless the transfer is volatile).
132       if (U.getOperandNo() == 1) {
133         if (MI->isVolatile()) return false;
134         continue;
135       }
136 
137       // If we already have seen a copy, reject the second one.
138       if (TheCopy) return false;
139 
140       // If the pointer has been offset from the start of the alloca, we can't
141       // safely handle this.
142       if (IsOffset) return false;
143 
144       // If the memintrinsic isn't using the alloca as the dest, reject it.
145       if (U.getOperandNo() != 0) return false;
146 
147       // If the source of the memcpy/move is not a constant global, reject it.
148       if (!pointsToConstantGlobal(MI->getSource()))
149         return false;
150 
151       // Otherwise, the transform is safe.  Remember the copy instruction.
152       TheCopy = MI;
153     }
154   }
155   return true;
156 }
157 
158 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
159 /// modified by a copy from a constant global.  If we can prove this, we can
160 /// replace any uses of the alloca with uses of the global directly.
161 static MemTransferInst *
162 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
163                                SmallVectorImpl<Instruction *> &ToDelete) {
164   MemTransferInst *TheCopy = nullptr;
165   if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
166     return TheCopy;
167   return nullptr;
168 }
169 
170 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
171   // Check for array size of 1 (scalar allocation).
172   if (!AI.isArrayAllocation()) {
173     // i32 1 is the canonical array size for scalar allocations.
174     if (AI.getArraySize()->getType()->isIntegerTy(32))
175       return nullptr;
176 
177     // Canonicalize it.
178     Value *V = IC.Builder->getInt32(1);
179     AI.setOperand(0, V);
180     return &AI;
181   }
182 
183   // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
184   if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
185     Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
186     AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
187     New->setAlignment(AI.getAlignment());
188 
189     // Scan to the end of the allocation instructions, to skip over a block of
190     // allocas if possible...also skip interleaved debug info
191     //
192     BasicBlock::iterator It(New);
193     while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
194       ++It;
195 
196     // Now that I is pointing to the first non-allocation-inst in the block,
197     // insert our getelementptr instruction...
198     //
199     Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
200     Value *NullIdx = Constant::getNullValue(IdxTy);
201     Value *Idx[2] = {NullIdx, NullIdx};
202     Instruction *GEP =
203         GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
204     IC.InsertNewInstBefore(GEP, *It);
205 
206     // Now make everything use the getelementptr instead of the original
207     // allocation.
208     return IC.ReplaceInstUsesWith(AI, GEP);
209   }
210 
211   if (isa<UndefValue>(AI.getArraySize()))
212     return IC.ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
213 
214   // Ensure that the alloca array size argument has type intptr_t, so that
215   // any casting is exposed early.
216   Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
217   if (AI.getArraySize()->getType() != IntPtrTy) {
218     Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
219     AI.setOperand(0, V);
220     return &AI;
221   }
222 
223   return nullptr;
224 }
225 
226 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
227   if (auto *I = simplifyAllocaArraySize(*this, AI))
228     return I;
229 
230   if (AI.getAllocatedType()->isSized()) {
231     // If the alignment is 0 (unspecified), assign it the preferred alignment.
232     if (AI.getAlignment() == 0)
233       AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
234 
235     // Move all alloca's of zero byte objects to the entry block and merge them
236     // together.  Note that we only do this for alloca's, because malloc should
237     // allocate and return a unique pointer, even for a zero byte allocation.
238     if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
239       // For a zero sized alloca there is no point in doing an array allocation.
240       // This is helpful if the array size is a complicated expression not used
241       // elsewhere.
242       if (AI.isArrayAllocation()) {
243         AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
244         return &AI;
245       }
246 
247       // Get the first instruction in the entry block.
248       BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
249       Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
250       if (FirstInst != &AI) {
251         // If the entry block doesn't start with a zero-size alloca then move
252         // this one to the start of the entry block.  There is no problem with
253         // dominance as the array size was forced to a constant earlier already.
254         AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
255         if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
256             DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
257           AI.moveBefore(FirstInst);
258           return &AI;
259         }
260 
261         // If the alignment of the entry block alloca is 0 (unspecified),
262         // assign it the preferred alignment.
263         if (EntryAI->getAlignment() == 0)
264           EntryAI->setAlignment(
265               DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
266         // Replace this zero-sized alloca with the one at the start of the entry
267         // block after ensuring that the address will be aligned enough for both
268         // types.
269         unsigned MaxAlign = std::max(EntryAI->getAlignment(),
270                                      AI.getAlignment());
271         EntryAI->setAlignment(MaxAlign);
272         if (AI.getType() != EntryAI->getType())
273           return new BitCastInst(EntryAI, AI.getType());
274         return ReplaceInstUsesWith(AI, EntryAI);
275       }
276     }
277   }
278 
279   if (AI.getAlignment()) {
280     // Check to see if this allocation is only modified by a memcpy/memmove from
281     // a constant global whose alignment is equal to or exceeds that of the
282     // allocation.  If this is the case, we can change all users to use
283     // the constant global instead.  This is commonly produced by the CFE by
284     // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
285     // is only subsequently read.
286     SmallVector<Instruction *, 4> ToDelete;
287     if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
288       unsigned SourceAlign = getOrEnforceKnownAlignment(
289           Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
290       if (AI.getAlignment() <= SourceAlign) {
291         DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
292         DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
293         for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
294           EraseInstFromFunction(*ToDelete[i]);
295         Constant *TheSrc = cast<Constant>(Copy->getSource());
296         Constant *Cast
297           = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
298         Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
299         EraseInstFromFunction(*Copy);
300         ++NumGlobalCopies;
301         return NewI;
302       }
303     }
304   }
305 
306   // At last, use the generic allocation site handler to aggressively remove
307   // unused allocas.
308   return visitAllocSite(AI);
309 }
310 
311 /// \brief Helper to combine a load to a new type.
312 ///
313 /// This just does the work of combining a load to a new type. It handles
314 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
315 /// loaded *value* type. This will convert it to a pointer, cast the operand to
316 /// that pointer type, load it, etc.
317 ///
318 /// Note that this will create all of the instructions with whatever insert
319 /// point the \c InstCombiner currently is using.
320 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
321                                       const Twine &Suffix = "") {
322   Value *Ptr = LI.getPointerOperand();
323   unsigned AS = LI.getPointerAddressSpace();
324   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
325   LI.getAllMetadata(MD);
326 
327   LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
328       IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
329       LI.getAlignment(), LI.getName() + Suffix);
330   MDBuilder MDB(NewLoad->getContext());
331   for (const auto &MDPair : MD) {
332     unsigned ID = MDPair.first;
333     MDNode *N = MDPair.second;
334     // Note, essentially every kind of metadata should be preserved here! This
335     // routine is supposed to clone a load instruction changing *only its type*.
336     // The only metadata it makes sense to drop is metadata which is invalidated
337     // when the pointer type changes. This should essentially never be the case
338     // in LLVM, but we explicitly switch over only known metadata to be
339     // conservatively correct. If you are adding metadata to LLVM which pertains
340     // to loads, you almost certainly want to add it here.
341     switch (ID) {
342     case LLVMContext::MD_dbg:
343     case LLVMContext::MD_tbaa:
344     case LLVMContext::MD_prof:
345     case LLVMContext::MD_fpmath:
346     case LLVMContext::MD_tbaa_struct:
347     case LLVMContext::MD_invariant_load:
348     case LLVMContext::MD_alias_scope:
349     case LLVMContext::MD_noalias:
350     case LLVMContext::MD_nontemporal:
351     case LLVMContext::MD_mem_parallel_loop_access:
352       // All of these directly apply.
353       NewLoad->setMetadata(ID, N);
354       break;
355 
356     case LLVMContext::MD_nonnull:
357       // This only directly applies if the new type is also a pointer.
358       if (NewTy->isPointerTy()) {
359         NewLoad->setMetadata(ID, N);
360         break;
361       }
362       // If it's integral now, translate it to !range metadata.
363       if (NewTy->isIntegerTy()) {
364         auto *ITy = cast<IntegerType>(NewTy);
365         auto *NullInt = ConstantExpr::getPtrToInt(
366             ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
367         auto *NonNullInt =
368             ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
369         NewLoad->setMetadata(LLVMContext::MD_range,
370                              MDB.createRange(NonNullInt, NullInt));
371       }
372       break;
373     case LLVMContext::MD_align:
374     case LLVMContext::MD_dereferenceable:
375     case LLVMContext::MD_dereferenceable_or_null:
376       // These only directly apply if the new type is also a pointer.
377       if (NewTy->isPointerTy())
378         NewLoad->setMetadata(ID, N);
379       break;
380     case LLVMContext::MD_range:
381       // FIXME: It would be nice to propagate this in some way, but the type
382       // conversions make it hard. If the new type is a pointer, we could
383       // translate it to !nonnull metadata.
384       break;
385     }
386   }
387   return NewLoad;
388 }
389 
390 /// \brief Combine a store to a new type.
391 ///
392 /// Returns the newly created store instruction.
393 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
394   Value *Ptr = SI.getPointerOperand();
395   unsigned AS = SI.getPointerAddressSpace();
396   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
397   SI.getAllMetadata(MD);
398 
399   StoreInst *NewStore = IC.Builder->CreateAlignedStore(
400       V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
401       SI.getAlignment());
402   for (const auto &MDPair : MD) {
403     unsigned ID = MDPair.first;
404     MDNode *N = MDPair.second;
405     // Note, essentially every kind of metadata should be preserved here! This
406     // routine is supposed to clone a store instruction changing *only its
407     // type*. The only metadata it makes sense to drop is metadata which is
408     // invalidated when the pointer type changes. This should essentially
409     // never be the case in LLVM, but we explicitly switch over only known
410     // metadata to be conservatively correct. If you are adding metadata to
411     // LLVM which pertains to stores, you almost certainly want to add it
412     // here.
413     switch (ID) {
414     case LLVMContext::MD_dbg:
415     case LLVMContext::MD_tbaa:
416     case LLVMContext::MD_prof:
417     case LLVMContext::MD_fpmath:
418     case LLVMContext::MD_tbaa_struct:
419     case LLVMContext::MD_alias_scope:
420     case LLVMContext::MD_noalias:
421     case LLVMContext::MD_nontemporal:
422     case LLVMContext::MD_mem_parallel_loop_access:
423       // All of these directly apply.
424       NewStore->setMetadata(ID, N);
425       break;
426 
427     case LLVMContext::MD_invariant_load:
428     case LLVMContext::MD_nonnull:
429     case LLVMContext::MD_range:
430     case LLVMContext::MD_align:
431     case LLVMContext::MD_dereferenceable:
432     case LLVMContext::MD_dereferenceable_or_null:
433       // These don't apply for stores.
434       break;
435     }
436   }
437 
438   return NewStore;
439 }
440 
441 /// \brief Combine loads to match the type of value their uses after looking
442 /// through intervening bitcasts.
443 ///
444 /// The core idea here is that if the result of a load is used in an operation,
445 /// we should load the type most conducive to that operation. For example, when
446 /// loading an integer and converting that immediately to a pointer, we should
447 /// instead directly load a pointer.
448 ///
449 /// However, this routine must never change the width of a load or the number of
450 /// loads as that would introduce a semantic change. This combine is expected to
451 /// be a semantic no-op which just allows loads to more closely model the types
452 /// of their consuming operations.
453 ///
454 /// Currently, we also refuse to change the precise type used for an atomic load
455 /// or a volatile load. This is debatable, and might be reasonable to change
456 /// later. However, it is risky in case some backend or other part of LLVM is
457 /// relying on the exact type loaded to select appropriate atomic operations.
458 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
459   // FIXME: We could probably with some care handle both volatile and atomic
460   // loads here but it isn't clear that this is important.
461   if (!LI.isSimple())
462     return nullptr;
463 
464   if (LI.use_empty())
465     return nullptr;
466 
467   Type *Ty = LI.getType();
468   const DataLayout &DL = IC.getDataLayout();
469 
470   // Try to canonicalize loads which are only ever stored to operate over
471   // integers instead of any other type. We only do this when the loaded type
472   // is sized and has a size exactly the same as its store size and the store
473   // size is a legal integer type.
474   if (!Ty->isIntegerTy() && Ty->isSized() &&
475       DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
476       DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty)) {
477     if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
478           auto *SI = dyn_cast<StoreInst>(U);
479           return SI && SI->getPointerOperand() != &LI;
480         })) {
481       LoadInst *NewLoad = combineLoadToNewType(
482           IC, LI,
483           Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
484       // Replace all the stores with stores of the newly loaded value.
485       for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
486         auto *SI = cast<StoreInst>(*UI++);
487         IC.Builder->SetInsertPoint(SI);
488         combineStoreToNewValue(IC, *SI, NewLoad);
489         IC.EraseInstFromFunction(*SI);
490       }
491       assert(LI.use_empty() && "Failed to remove all users of the load!");
492       // Return the old load so the combiner can delete it safely.
493       return &LI;
494     }
495   }
496 
497   // Fold away bit casts of the loaded value by loading the desired type.
498   // We can do this for BitCastInsts as well as casts from and to pointer types,
499   // as long as those are noops (i.e., the source or dest type have the same
500   // bitwidth as the target's pointers).
501   if (LI.hasOneUse())
502     if (auto* CI = dyn_cast<CastInst>(LI.user_back())) {
503       if (CI->isNoopCast(DL)) {
504         LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
505         CI->replaceAllUsesWith(NewLoad);
506         IC.EraseInstFromFunction(*CI);
507         return &LI;
508       }
509     }
510 
511   // FIXME: We should also canonicalize loads of vectors when their elements are
512   // cast to other types.
513   return nullptr;
514 }
515 
516 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
517   // FIXME: We could probably with some care handle both volatile and atomic
518   // stores here but it isn't clear that this is important.
519   if (!LI.isSimple())
520     return nullptr;
521 
522   Type *T = LI.getType();
523   if (!T->isAggregateType())
524     return nullptr;
525 
526   assert(LI.getAlignment() && "Alignment must be set at this point");
527 
528   if (auto *ST = dyn_cast<StructType>(T)) {
529     // If the struct only have one element, we unpack.
530     unsigned Count = ST->getNumElements();
531     if (Count == 1) {
532       LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
533                                                ".unpack");
534       return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
535         UndefValue::get(T), NewLoad, 0, LI.getName()));
536     }
537 
538     // We don't want to break loads with padding here as we'd loose
539     // the knowledge that padding exists for the rest of the pipeline.
540     const DataLayout &DL = IC.getDataLayout();
541     auto *SL = DL.getStructLayout(ST);
542     if (SL->hasPadding())
543       return nullptr;
544 
545     auto Name = LI.getName();
546     SmallString<16> LoadName = Name;
547     LoadName += ".unpack";
548     SmallString<16> EltName = Name;
549     EltName += ".elt";
550     auto *Addr = LI.getPointerOperand();
551     Value *V = UndefValue::get(T);
552     auto *IdxType = Type::getInt32Ty(ST->getContext());
553     auto *Zero = ConstantInt::get(IdxType, 0);
554     for (unsigned i = 0; i < Count; i++) {
555       Value *Indices[2] = {
556         Zero,
557         ConstantInt::get(IdxType, i),
558       };
559       auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), EltName);
560       auto *L = IC.Builder->CreateLoad(ST->getTypeAtIndex(i), Ptr, LoadName);
561       V = IC.Builder->CreateInsertValue(V, L, i);
562     }
563 
564     V->setName(Name);
565     return IC.ReplaceInstUsesWith(LI, V);
566   }
567 
568   if (auto *AT = dyn_cast<ArrayType>(T)) {
569     // If the array only have one element, we unpack.
570     if (AT->getNumElements() == 1) {
571       LoadInst *NewLoad = combineLoadToNewType(IC, LI, AT->getElementType(),
572                                                ".unpack");
573       return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
574         UndefValue::get(T), NewLoad, 0, LI.getName()));
575     }
576   }
577 
578   return nullptr;
579 }
580 
581 // If we can determine that all possible objects pointed to by the provided
582 // pointer value are, not only dereferenceable, but also definitively less than
583 // or equal to the provided maximum size, then return true. Otherwise, return
584 // false (constant global values and allocas fall into this category).
585 //
586 // FIXME: This should probably live in ValueTracking (or similar).
587 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
588                                      const DataLayout &DL) {
589   SmallPtrSet<Value *, 4> Visited;
590   SmallVector<Value *, 4> Worklist(1, V);
591 
592   do {
593     Value *P = Worklist.pop_back_val();
594     P = P->stripPointerCasts();
595 
596     if (!Visited.insert(P).second)
597       continue;
598 
599     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
600       Worklist.push_back(SI->getTrueValue());
601       Worklist.push_back(SI->getFalseValue());
602       continue;
603     }
604 
605     if (PHINode *PN = dyn_cast<PHINode>(P)) {
606       for (Value *IncValue : PN->incoming_values())
607         Worklist.push_back(IncValue);
608       continue;
609     }
610 
611     if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
612       if (GA->mayBeOverridden())
613         return false;
614       Worklist.push_back(GA->getAliasee());
615       continue;
616     }
617 
618     // If we know how big this object is, and it is less than MaxSize, continue
619     // searching. Otherwise, return false.
620     if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
621       if (!AI->getAllocatedType()->isSized())
622         return false;
623 
624       ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
625       if (!CS)
626         return false;
627 
628       uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
629       // Make sure that, even if the multiplication below would wrap as an
630       // uint64_t, we still do the right thing.
631       if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
632         return false;
633       continue;
634     }
635 
636     if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
637       if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
638         return false;
639 
640       uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
641       if (InitSize > MaxSize)
642         return false;
643       continue;
644     }
645 
646     return false;
647   } while (!Worklist.empty());
648 
649   return true;
650 }
651 
652 // If we're indexing into an object of a known size, and the outer index is
653 // not a constant, but having any value but zero would lead to undefined
654 // behavior, replace it with zero.
655 //
656 // For example, if we have:
657 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
658 // ...
659 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
660 // ... = load i32* %arrayidx, align 4
661 // Then we know that we can replace %x in the GEP with i64 0.
662 //
663 // FIXME: We could fold any GEP index to zero that would cause UB if it were
664 // not zero. Currently, we only handle the first such index. Also, we could
665 // also search through non-zero constant indices if we kept track of the
666 // offsets those indices implied.
667 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
668                                      Instruction *MemI, unsigned &Idx) {
669   if (GEPI->getNumOperands() < 2)
670     return false;
671 
672   // Find the first non-zero index of a GEP. If all indices are zero, return
673   // one past the last index.
674   auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
675     unsigned I = 1;
676     for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
677       Value *V = GEPI->getOperand(I);
678       if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
679         if (CI->isZero())
680           continue;
681 
682       break;
683     }
684 
685     return I;
686   };
687 
688   // Skip through initial 'zero' indices, and find the corresponding pointer
689   // type. See if the next index is not a constant.
690   Idx = FirstNZIdx(GEPI);
691   if (Idx == GEPI->getNumOperands())
692     return false;
693   if (isa<Constant>(GEPI->getOperand(Idx)))
694     return false;
695 
696   SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
697   Type *AllocTy =
698     GetElementPtrInst::getIndexedType(GEPI->getSourceElementType(), Ops);
699   if (!AllocTy || !AllocTy->isSized())
700     return false;
701   const DataLayout &DL = IC.getDataLayout();
702   uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
703 
704   // If there are more indices after the one we might replace with a zero, make
705   // sure they're all non-negative. If any of them are negative, the overall
706   // address being computed might be before the base address determined by the
707   // first non-zero index.
708   auto IsAllNonNegative = [&]() {
709     for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
710       bool KnownNonNegative, KnownNegative;
711       IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
712                         KnownNegative, 0, MemI);
713       if (KnownNonNegative)
714         continue;
715       return false;
716     }
717 
718     return true;
719   };
720 
721   // FIXME: If the GEP is not inbounds, and there are extra indices after the
722   // one we'll replace, those could cause the address computation to wrap
723   // (rendering the IsAllNonNegative() check below insufficient). We can do
724   // better, ignoring zero indices (and other indices we can prove small
725   // enough not to wrap).
726   if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
727     return false;
728 
729   // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
730   // also known to be dereferenceable.
731   return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
732          IsAllNonNegative();
733 }
734 
735 // If we're indexing into an object with a variable index for the memory
736 // access, but the object has only one element, we can assume that the index
737 // will always be zero. If we replace the GEP, return it.
738 template <typename T>
739 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
740                                           T &MemI) {
741   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
742     unsigned Idx;
743     if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
744       Instruction *NewGEPI = GEPI->clone();
745       NewGEPI->setOperand(Idx,
746         ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
747       NewGEPI->insertBefore(GEPI);
748       MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
749       return NewGEPI;
750     }
751   }
752 
753   return nullptr;
754 }
755 
756 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
757   Value *Op = LI.getOperand(0);
758 
759   // Try to canonicalize the loaded type.
760   if (Instruction *Res = combineLoadToOperationType(*this, LI))
761     return Res;
762 
763   // Attempt to improve the alignment.
764   unsigned KnownAlign = getOrEnforceKnownAlignment(
765       Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
766   unsigned LoadAlign = LI.getAlignment();
767   unsigned EffectiveLoadAlign =
768       LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
769 
770   if (KnownAlign > EffectiveLoadAlign)
771     LI.setAlignment(KnownAlign);
772   else if (LoadAlign == 0)
773     LI.setAlignment(EffectiveLoadAlign);
774 
775   // Replace GEP indices if possible.
776   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
777       Worklist.Add(NewGEPI);
778       return &LI;
779   }
780 
781   // None of the following transforms are legal for volatile/atomic loads.
782   // FIXME: Some of it is okay for atomic loads; needs refactoring.
783   if (!LI.isSimple()) return nullptr;
784 
785   if (Instruction *Res = unpackLoadToAggregate(*this, LI))
786     return Res;
787 
788   // Do really simple store-to-load forwarding and load CSE, to catch cases
789   // where there are several consecutive memory accesses to the same location,
790   // separated by a few arithmetic operations.
791   BasicBlock::iterator BBI(LI);
792   AAMDNodes AATags;
793   if (Value *AvailableVal =
794       FindAvailableLoadedValue(&LI, LI.getParent(), BBI,
795                                DefMaxInstsToScan, AA, &AATags)) {
796     if (LoadInst *NLI = dyn_cast<LoadInst>(AvailableVal)) {
797       unsigned KnownIDs[] = {
798           LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
799           LLVMContext::MD_noalias,         LLVMContext::MD_range,
800           LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
801           LLVMContext::MD_invariant_group, LLVMContext::MD_align,
802           LLVMContext::MD_dereferenceable,
803           LLVMContext::MD_dereferenceable_or_null};
804       combineMetadata(NLI, &LI, KnownIDs);
805     };
806 
807     return ReplaceInstUsesWith(
808         LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
809                                             LI.getName() + ".cast"));
810   }
811 
812   // load(gep null, ...) -> unreachable
813   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
814     const Value *GEPI0 = GEPI->getOperand(0);
815     // TODO: Consider a target hook for valid address spaces for this xform.
816     if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
817       // Insert a new store to null instruction before the load to indicate
818       // that this code is not reachable.  We do this instead of inserting
819       // an unreachable instruction directly because we cannot modify the
820       // CFG.
821       new StoreInst(UndefValue::get(LI.getType()),
822                     Constant::getNullValue(Op->getType()), &LI);
823       return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
824     }
825   }
826 
827   // load null/undef -> unreachable
828   // TODO: Consider a target hook for valid address spaces for this xform.
829   if (isa<UndefValue>(Op) ||
830       (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
831     // Insert a new store to null instruction before the load to indicate that
832     // this code is not reachable.  We do this instead of inserting an
833     // unreachable instruction directly because we cannot modify the CFG.
834     new StoreInst(UndefValue::get(LI.getType()),
835                   Constant::getNullValue(Op->getType()), &LI);
836     return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
837   }
838 
839   if (Op->hasOneUse()) {
840     // Change select and PHI nodes to select values instead of addresses: this
841     // helps alias analysis out a lot, allows many others simplifications, and
842     // exposes redundancy in the code.
843     //
844     // Note that we cannot do the transformation unless we know that the
845     // introduced loads cannot trap!  Something like this is valid as long as
846     // the condition is always false: load (select bool %C, int* null, int* %G),
847     // but it would not be valid if we transformed it to load from null
848     // unconditionally.
849     //
850     if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
851       // load (select (Cond, &V1, &V2))  --> select(Cond, load &V1, load &V2).
852       unsigned Align = LI.getAlignment();
853       if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, SI) &&
854           isSafeToLoadUnconditionally(SI->getOperand(2), Align, SI)) {
855         LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
856                                            SI->getOperand(1)->getName()+".val");
857         LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
858                                            SI->getOperand(2)->getName()+".val");
859         V1->setAlignment(Align);
860         V2->setAlignment(Align);
861         return SelectInst::Create(SI->getCondition(), V1, V2);
862       }
863 
864       // load (select (cond, null, P)) -> load P
865       if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
866           LI.getPointerAddressSpace() == 0) {
867         LI.setOperand(0, SI->getOperand(2));
868         return &LI;
869       }
870 
871       // load (select (cond, P, null)) -> load P
872       if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
873           LI.getPointerAddressSpace() == 0) {
874         LI.setOperand(0, SI->getOperand(1));
875         return &LI;
876       }
877     }
878   }
879   return nullptr;
880 }
881 
882 /// \brief Combine stores to match the type of value being stored.
883 ///
884 /// The core idea here is that the memory does not have any intrinsic type and
885 /// where we can we should match the type of a store to the type of value being
886 /// stored.
887 ///
888 /// However, this routine must never change the width of a store or the number of
889 /// stores as that would introduce a semantic change. This combine is expected to
890 /// be a semantic no-op which just allows stores to more closely model the types
891 /// of their incoming values.
892 ///
893 /// Currently, we also refuse to change the precise type used for an atomic or
894 /// volatile store. This is debatable, and might be reasonable to change later.
895 /// However, it is risky in case some backend or other part of LLVM is relying
896 /// on the exact type stored to select appropriate atomic operations.
897 ///
898 /// \returns true if the store was successfully combined away. This indicates
899 /// the caller must erase the store instruction. We have to let the caller erase
900 /// the store instruction as otherwise there is no way to signal whether it was
901 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
902 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
903   // FIXME: We could probably with some care handle both volatile and atomic
904   // stores here but it isn't clear that this is important.
905   if (!SI.isSimple())
906     return false;
907 
908   Value *V = SI.getValueOperand();
909 
910   // Fold away bit casts of the stored value by storing the original type.
911   if (auto *BC = dyn_cast<BitCastInst>(V)) {
912     V = BC->getOperand(0);
913     combineStoreToNewValue(IC, SI, V);
914     return true;
915   }
916 
917   // FIXME: We should also canonicalize loads of vectors when their elements are
918   // cast to other types.
919   return false;
920 }
921 
922 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
923   // FIXME: We could probably with some care handle both volatile and atomic
924   // stores here but it isn't clear that this is important.
925   if (!SI.isSimple())
926     return false;
927 
928   Value *V = SI.getValueOperand();
929   Type *T = V->getType();
930 
931   if (!T->isAggregateType())
932     return false;
933 
934   if (auto *ST = dyn_cast<StructType>(T)) {
935     // If the struct only have one element, we unpack.
936     unsigned Count = ST->getNumElements();
937     if (Count == 1) {
938       V = IC.Builder->CreateExtractValue(V, 0);
939       combineStoreToNewValue(IC, SI, V);
940       return true;
941     }
942 
943     // We don't want to break loads with padding here as we'd loose
944     // the knowledge that padding exists for the rest of the pipeline.
945     const DataLayout &DL = IC.getDataLayout();
946     auto *SL = DL.getStructLayout(ST);
947     if (SL->hasPadding())
948       return false;
949 
950     SmallString<16> EltName = V->getName();
951     EltName += ".elt";
952     auto *Addr = SI.getPointerOperand();
953     SmallString<16> AddrName = Addr->getName();
954     AddrName += ".repack";
955     auto *IdxType = Type::getInt32Ty(ST->getContext());
956     auto *Zero = ConstantInt::get(IdxType, 0);
957     for (unsigned i = 0; i < Count; i++) {
958       Value *Indices[2] = {
959         Zero,
960         ConstantInt::get(IdxType, i),
961       };
962       auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), AddrName);
963       auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
964       IC.Builder->CreateStore(Val, Ptr);
965     }
966 
967     return true;
968   }
969 
970   if (auto *AT = dyn_cast<ArrayType>(T)) {
971     // If the array only have one element, we unpack.
972     if (AT->getNumElements() == 1) {
973       V = IC.Builder->CreateExtractValue(V, 0);
974       combineStoreToNewValue(IC, SI, V);
975       return true;
976     }
977   }
978 
979   return false;
980 }
981 
982 /// equivalentAddressValues - Test if A and B will obviously have the same
983 /// value. This includes recognizing that %t0 and %t1 will have the same
984 /// value in code like this:
985 ///   %t0 = getelementptr \@a, 0, 3
986 ///   store i32 0, i32* %t0
987 ///   %t1 = getelementptr \@a, 0, 3
988 ///   %t2 = load i32* %t1
989 ///
990 static bool equivalentAddressValues(Value *A, Value *B) {
991   // Test if the values are trivially equivalent.
992   if (A == B) return true;
993 
994   // Test if the values come form identical arithmetic instructions.
995   // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
996   // its only used to compare two uses within the same basic block, which
997   // means that they'll always either have the same value or one of them
998   // will have an undefined value.
999   if (isa<BinaryOperator>(A) ||
1000       isa<CastInst>(A) ||
1001       isa<PHINode>(A) ||
1002       isa<GetElementPtrInst>(A))
1003     if (Instruction *BI = dyn_cast<Instruction>(B))
1004       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1005         return true;
1006 
1007   // Otherwise they may not be equivalent.
1008   return false;
1009 }
1010 
1011 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
1012   Value *Val = SI.getOperand(0);
1013   Value *Ptr = SI.getOperand(1);
1014 
1015   // Try to canonicalize the stored type.
1016   if (combineStoreToValueType(*this, SI))
1017     return EraseInstFromFunction(SI);
1018 
1019   // Attempt to improve the alignment.
1020   unsigned KnownAlign = getOrEnforceKnownAlignment(
1021       Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
1022   unsigned StoreAlign = SI.getAlignment();
1023   unsigned EffectiveStoreAlign =
1024       StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
1025 
1026   if (KnownAlign > EffectiveStoreAlign)
1027     SI.setAlignment(KnownAlign);
1028   else if (StoreAlign == 0)
1029     SI.setAlignment(EffectiveStoreAlign);
1030 
1031   // Try to canonicalize the stored type.
1032   if (unpackStoreToAggregate(*this, SI))
1033     return EraseInstFromFunction(SI);
1034 
1035   // Replace GEP indices if possible.
1036   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
1037       Worklist.Add(NewGEPI);
1038       return &SI;
1039   }
1040 
1041   // Don't hack volatile/ordered stores.
1042   // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1043   if (!SI.isUnordered()) return nullptr;
1044 
1045   // If the RHS is an alloca with a single use, zapify the store, making the
1046   // alloca dead.
1047   if (Ptr->hasOneUse()) {
1048     if (isa<AllocaInst>(Ptr))
1049       return EraseInstFromFunction(SI);
1050     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1051       if (isa<AllocaInst>(GEP->getOperand(0))) {
1052         if (GEP->getOperand(0)->hasOneUse())
1053           return EraseInstFromFunction(SI);
1054       }
1055     }
1056   }
1057 
1058   // Do really simple DSE, to catch cases where there are several consecutive
1059   // stores to the same location, separated by a few arithmetic operations. This
1060   // situation often occurs with bitfield accesses.
1061   BasicBlock::iterator BBI(SI);
1062   for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1063        --ScanInsts) {
1064     --BBI;
1065     // Don't count debug info directives, lest they affect codegen,
1066     // and we skip pointer-to-pointer bitcasts, which are NOPs.
1067     if (isa<DbgInfoIntrinsic>(BBI) ||
1068         (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1069       ScanInsts++;
1070       continue;
1071     }
1072 
1073     if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1074       // Prev store isn't volatile, and stores to the same location?
1075       if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
1076                                                         SI.getOperand(1))) {
1077         ++NumDeadStore;
1078         ++BBI;
1079         EraseInstFromFunction(*PrevSI);
1080         continue;
1081       }
1082       break;
1083     }
1084 
1085     // If this is a load, we have to stop.  However, if the loaded value is from
1086     // the pointer we're loading and is producing the pointer we're storing,
1087     // then *this* store is dead (X = load P; store X -> P).
1088     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1089       if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1090         assert(SI.isUnordered() && "can't eliminate ordering operation");
1091         return EraseInstFromFunction(SI);
1092       }
1093 
1094       // Otherwise, this is a load from some other location.  Stores before it
1095       // may not be dead.
1096       break;
1097     }
1098 
1099     // Don't skip over loads or things that can modify memory.
1100     if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
1101       break;
1102   }
1103 
1104   // store X, null    -> turns into 'unreachable' in SimplifyCFG
1105   if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
1106     if (!isa<UndefValue>(Val)) {
1107       SI.setOperand(0, UndefValue::get(Val->getType()));
1108       if (Instruction *U = dyn_cast<Instruction>(Val))
1109         Worklist.Add(U);  // Dropped a use.
1110     }
1111     return nullptr;  // Do not modify these!
1112   }
1113 
1114   // store undef, Ptr -> noop
1115   if (isa<UndefValue>(Val))
1116     return EraseInstFromFunction(SI);
1117 
1118   // The code below needs to be audited and adjusted for unordered atomics
1119   if (!SI.isSimple())
1120     return nullptr;
1121 
1122   // If this store is the last instruction in the basic block (possibly
1123   // excepting debug info instructions), and if the block ends with an
1124   // unconditional branch, try to move it to the successor block.
1125   BBI = SI.getIterator();
1126   do {
1127     ++BBI;
1128   } while (isa<DbgInfoIntrinsic>(BBI) ||
1129            (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1130   if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1131     if (BI->isUnconditional())
1132       if (SimplifyStoreAtEndOfBlock(SI))
1133         return nullptr;  // xform done!
1134 
1135   return nullptr;
1136 }
1137 
1138 /// SimplifyStoreAtEndOfBlock - Turn things like:
1139 ///   if () { *P = v1; } else { *P = v2 }
1140 /// into a phi node with a store in the successor.
1141 ///
1142 /// Simplify things like:
1143 ///   *P = v1; if () { *P = v2; }
1144 /// into a phi node with a store in the successor.
1145 ///
1146 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
1147   BasicBlock *StoreBB = SI.getParent();
1148 
1149   // Check to see if the successor block has exactly two incoming edges.  If
1150   // so, see if the other predecessor contains a store to the same location.
1151   // if so, insert a PHI node (if needed) and move the stores down.
1152   BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1153 
1154   // Determine whether Dest has exactly two predecessors and, if so, compute
1155   // the other predecessor.
1156   pred_iterator PI = pred_begin(DestBB);
1157   BasicBlock *P = *PI;
1158   BasicBlock *OtherBB = nullptr;
1159 
1160   if (P != StoreBB)
1161     OtherBB = P;
1162 
1163   if (++PI == pred_end(DestBB))
1164     return false;
1165 
1166   P = *PI;
1167   if (P != StoreBB) {
1168     if (OtherBB)
1169       return false;
1170     OtherBB = P;
1171   }
1172   if (++PI != pred_end(DestBB))
1173     return false;
1174 
1175   // Bail out if all the relevant blocks aren't distinct (this can happen,
1176   // for example, if SI is in an infinite loop)
1177   if (StoreBB == DestBB || OtherBB == DestBB)
1178     return false;
1179 
1180   // Verify that the other block ends in a branch and is not otherwise empty.
1181   BasicBlock::iterator BBI(OtherBB->getTerminator());
1182   BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1183   if (!OtherBr || BBI == OtherBB->begin())
1184     return false;
1185 
1186   // If the other block ends in an unconditional branch, check for the 'if then
1187   // else' case.  there is an instruction before the branch.
1188   StoreInst *OtherStore = nullptr;
1189   if (OtherBr->isUnconditional()) {
1190     --BBI;
1191     // Skip over debugging info.
1192     while (isa<DbgInfoIntrinsic>(BBI) ||
1193            (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1194       if (BBI==OtherBB->begin())
1195         return false;
1196       --BBI;
1197     }
1198     // If this isn't a store, isn't a store to the same location, or is not the
1199     // right kind of store, bail out.
1200     OtherStore = dyn_cast<StoreInst>(BBI);
1201     if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1202         !SI.isSameOperationAs(OtherStore))
1203       return false;
1204   } else {
1205     // Otherwise, the other block ended with a conditional branch. If one of the
1206     // destinations is StoreBB, then we have the if/then case.
1207     if (OtherBr->getSuccessor(0) != StoreBB &&
1208         OtherBr->getSuccessor(1) != StoreBB)
1209       return false;
1210 
1211     // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1212     // if/then triangle.  See if there is a store to the same ptr as SI that
1213     // lives in OtherBB.
1214     for (;; --BBI) {
1215       // Check to see if we find the matching store.
1216       if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1217         if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1218             !SI.isSameOperationAs(OtherStore))
1219           return false;
1220         break;
1221       }
1222       // If we find something that may be using or overwriting the stored
1223       // value, or if we run out of instructions, we can't do the xform.
1224       if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
1225           BBI == OtherBB->begin())
1226         return false;
1227     }
1228 
1229     // In order to eliminate the store in OtherBr, we have to
1230     // make sure nothing reads or overwrites the stored value in
1231     // StoreBB.
1232     for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1233       // FIXME: This should really be AA driven.
1234       if (I->mayReadFromMemory() || I->mayWriteToMemory())
1235         return false;
1236     }
1237   }
1238 
1239   // Insert a PHI node now if we need it.
1240   Value *MergedVal = OtherStore->getOperand(0);
1241   if (MergedVal != SI.getOperand(0)) {
1242     PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1243     PN->addIncoming(SI.getOperand(0), SI.getParent());
1244     PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1245     MergedVal = InsertNewInstBefore(PN, DestBB->front());
1246   }
1247 
1248   // Advance to a place where it is safe to insert the new store and
1249   // insert it.
1250   BBI = DestBB->getFirstInsertionPt();
1251   StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
1252                                    SI.isVolatile(),
1253                                    SI.getAlignment(),
1254                                    SI.getOrdering(),
1255                                    SI.getSynchScope());
1256   InsertNewInstBefore(NewSI, *BBI);
1257   NewSI->setDebugLoc(OtherStore->getDebugLoc());
1258 
1259   // If the two stores had AA tags, merge them.
1260   AAMDNodes AATags;
1261   SI.getAAMetadata(AATags);
1262   if (AATags) {
1263     OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1264     NewSI->setAAMetadata(AATags);
1265   }
1266 
1267   // Nuke the old stores.
1268   EraseInstFromFunction(SI);
1269   EraseInstFromFunction(*OtherStore);
1270   return true;
1271 }
1272