1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This family of functions perform various local transformations to the
11 // program.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/Local.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseMapInfo.h"
19 #include "llvm/ADT/DenseSet.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/ADT/TinyPtrVector.h"
29 #include "llvm/Analysis/ConstantFolding.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/LazyValueInfo.h"
33 #include "llvm/Analysis/MemoryBuiltins.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/BinaryFormat/Dwarf.h"
37 #include "llvm/IR/Argument.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/CFG.h"
41 #include "llvm/IR/CallSite.h"
42 #include "llvm/IR/Constant.h"
43 #include "llvm/IR/ConstantRange.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DIBuilder.h"
46 #include "llvm/IR/DataLayout.h"
47 #include "llvm/IR/DebugInfoMetadata.h"
48 #include "llvm/IR/DebugLoc.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/GetElementPtrTypeIterator.h"
53 #include "llvm/IR/GlobalObject.h"
54 #include "llvm/IR/IRBuilder.h"
55 #include "llvm/IR/InstrTypes.h"
56 #include "llvm/IR/Instruction.h"
57 #include "llvm/IR/Instructions.h"
58 #include "llvm/IR/IntrinsicInst.h"
59 #include "llvm/IR/Intrinsics.h"
60 #include "llvm/IR/LLVMContext.h"
61 #include "llvm/IR/MDBuilder.h"
62 #include "llvm/IR/Metadata.h"
63 #include "llvm/IR/Module.h"
64 #include "llvm/IR/Operator.h"
65 #include "llvm/IR/PatternMatch.h"
66 #include "llvm/IR/Type.h"
67 #include "llvm/IR/Use.h"
68 #include "llvm/IR/User.h"
69 #include "llvm/IR/Value.h"
70 #include "llvm/IR/ValueHandle.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/ErrorHandling.h"
74 #include "llvm/Support/KnownBits.h"
75 #include "llvm/Support/raw_ostream.h"
76 #include <algorithm>
77 #include <cassert>
78 #include <climits>
79 #include <cstdint>
80 #include <iterator>
81 #include <map>
82 #include <utility>
83 
84 using namespace llvm;
85 using namespace llvm::PatternMatch;
86 
87 #define DEBUG_TYPE "local"
88 
89 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
90 
91 //===----------------------------------------------------------------------===//
92 //  Local constant propagation.
93 //
94 
95 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
96 /// constant value, convert it into an unconditional branch to the constant
97 /// destination.  This is a nontrivial operation because the successors of this
98 /// basic block must have their PHI nodes updated.
99 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
100 /// conditions and indirectbr addresses this might make dead if
101 /// DeleteDeadConditions is true.
102 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
103                                   const TargetLibraryInfo *TLI) {
104   TerminatorInst *T = BB->getTerminator();
105   IRBuilder<> Builder(T);
106 
107   // Branch - See if we are conditional jumping on constant
108   if (auto *BI = dyn_cast<BranchInst>(T)) {
109     if (BI->isUnconditional()) return false;  // Can't optimize uncond branch
110     BasicBlock *Dest1 = BI->getSuccessor(0);
111     BasicBlock *Dest2 = BI->getSuccessor(1);
112 
113     if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
114       // Are we branching on constant?
115       // YES.  Change to unconditional branch...
116       BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
117       BasicBlock *OldDest     = Cond->getZExtValue() ? Dest2 : Dest1;
118 
119       // Let the basic block know that we are letting go of it.  Based on this,
120       // it will adjust it's PHI nodes.
121       OldDest->removePredecessor(BB);
122 
123       // Replace the conditional branch with an unconditional one.
124       Builder.CreateBr(Destination);
125       BI->eraseFromParent();
126       return true;
127     }
128 
129     if (Dest2 == Dest1) {       // Conditional branch to same location?
130       // This branch matches something like this:
131       //     br bool %cond, label %Dest, label %Dest
132       // and changes it into:  br label %Dest
133 
134       // Let the basic block know that we are letting go of one copy of it.
135       assert(BI->getParent() && "Terminator not inserted in block!");
136       Dest1->removePredecessor(BI->getParent());
137 
138       // Replace the conditional branch with an unconditional one.
139       Builder.CreateBr(Dest1);
140       Value *Cond = BI->getCondition();
141       BI->eraseFromParent();
142       if (DeleteDeadConditions)
143         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
144       return true;
145     }
146     return false;
147   }
148 
149   if (auto *SI = dyn_cast<SwitchInst>(T)) {
150     // If we are switching on a constant, we can convert the switch to an
151     // unconditional branch.
152     auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
153     BasicBlock *DefaultDest = SI->getDefaultDest();
154     BasicBlock *TheOnlyDest = DefaultDest;
155 
156     // If the default is unreachable, ignore it when searching for TheOnlyDest.
157     if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
158         SI->getNumCases() > 0) {
159       TheOnlyDest = SI->case_begin()->getCaseSuccessor();
160     }
161 
162     // Figure out which case it goes to.
163     for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
164       // Found case matching a constant operand?
165       if (i->getCaseValue() == CI) {
166         TheOnlyDest = i->getCaseSuccessor();
167         break;
168       }
169 
170       // Check to see if this branch is going to the same place as the default
171       // dest.  If so, eliminate it as an explicit compare.
172       if (i->getCaseSuccessor() == DefaultDest) {
173         MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
174         unsigned NCases = SI->getNumCases();
175         // Fold the case metadata into the default if there will be any branches
176         // left, unless the metadata doesn't match the switch.
177         if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
178           // Collect branch weights into a vector.
179           SmallVector<uint32_t, 8> Weights;
180           for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
181                ++MD_i) {
182             auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
183             Weights.push_back(CI->getValue().getZExtValue());
184           }
185           // Merge weight of this case to the default weight.
186           unsigned idx = i->getCaseIndex();
187           Weights[0] += Weights[idx+1];
188           // Remove weight for this case.
189           std::swap(Weights[idx+1], Weights.back());
190           Weights.pop_back();
191           SI->setMetadata(LLVMContext::MD_prof,
192                           MDBuilder(BB->getContext()).
193                           createBranchWeights(Weights));
194         }
195         // Remove this entry.
196         DefaultDest->removePredecessor(SI->getParent());
197         i = SI->removeCase(i);
198         e = SI->case_end();
199         continue;
200       }
201 
202       // Otherwise, check to see if the switch only branches to one destination.
203       // We do this by reseting "TheOnlyDest" to null when we find two non-equal
204       // destinations.
205       if (i->getCaseSuccessor() != TheOnlyDest)
206         TheOnlyDest = nullptr;
207 
208       // Increment this iterator as we haven't removed the case.
209       ++i;
210     }
211 
212     if (CI && !TheOnlyDest) {
213       // Branching on a constant, but not any of the cases, go to the default
214       // successor.
215       TheOnlyDest = SI->getDefaultDest();
216     }
217 
218     // If we found a single destination that we can fold the switch into, do so
219     // now.
220     if (TheOnlyDest) {
221       // Insert the new branch.
222       Builder.CreateBr(TheOnlyDest);
223       BasicBlock *BB = SI->getParent();
224 
225       // Remove entries from PHI nodes which we no longer branch to...
226       for (BasicBlock *Succ : SI->successors()) {
227         // Found case matching a constant operand?
228         if (Succ == TheOnlyDest)
229           TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
230         else
231           Succ->removePredecessor(BB);
232       }
233 
234       // Delete the old switch.
235       Value *Cond = SI->getCondition();
236       SI->eraseFromParent();
237       if (DeleteDeadConditions)
238         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
239       return true;
240     }
241 
242     if (SI->getNumCases() == 1) {
243       // Otherwise, we can fold this switch into a conditional branch
244       // instruction if it has only one non-default destination.
245       auto FirstCase = *SI->case_begin();
246       Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
247           FirstCase.getCaseValue(), "cond");
248 
249       // Insert the new branch.
250       BranchInst *NewBr = Builder.CreateCondBr(Cond,
251                                                FirstCase.getCaseSuccessor(),
252                                                SI->getDefaultDest());
253       MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
254       if (MD && MD->getNumOperands() == 3) {
255         ConstantInt *SICase =
256             mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
257         ConstantInt *SIDef =
258             mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
259         assert(SICase && SIDef);
260         // The TrueWeight should be the weight for the single case of SI.
261         NewBr->setMetadata(LLVMContext::MD_prof,
262                         MDBuilder(BB->getContext()).
263                         createBranchWeights(SICase->getValue().getZExtValue(),
264                                             SIDef->getValue().getZExtValue()));
265       }
266 
267       // Update make.implicit metadata to the newly-created conditional branch.
268       MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
269       if (MakeImplicitMD)
270         NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
271 
272       // Delete the old switch.
273       SI->eraseFromParent();
274       return true;
275     }
276     return false;
277   }
278 
279   if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
280     // indirectbr blockaddress(@F, @BB) -> br label @BB
281     if (auto *BA =
282           dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
283       BasicBlock *TheOnlyDest = BA->getBasicBlock();
284       // Insert the new branch.
285       Builder.CreateBr(TheOnlyDest);
286 
287       for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
288         if (IBI->getDestination(i) == TheOnlyDest)
289           TheOnlyDest = nullptr;
290         else
291           IBI->getDestination(i)->removePredecessor(IBI->getParent());
292       }
293       Value *Address = IBI->getAddress();
294       IBI->eraseFromParent();
295       if (DeleteDeadConditions)
296         RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
297 
298       // If we didn't find our destination in the IBI successor list, then we
299       // have undefined behavior.  Replace the unconditional branch with an
300       // 'unreachable' instruction.
301       if (TheOnlyDest) {
302         BB->getTerminator()->eraseFromParent();
303         new UnreachableInst(BB->getContext(), BB);
304       }
305 
306       return true;
307     }
308   }
309 
310   return false;
311 }
312 
313 //===----------------------------------------------------------------------===//
314 //  Local dead code elimination.
315 //
316 
317 /// isInstructionTriviallyDead - Return true if the result produced by the
318 /// instruction is not used, and the instruction has no side effects.
319 ///
320 bool llvm::isInstructionTriviallyDead(Instruction *I,
321                                       const TargetLibraryInfo *TLI) {
322   if (!I->use_empty())
323     return false;
324   return wouldInstructionBeTriviallyDead(I, TLI);
325 }
326 
327 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
328                                            const TargetLibraryInfo *TLI) {
329   if (isa<TerminatorInst>(I))
330     return false;
331 
332   // We don't want the landingpad-like instructions removed by anything this
333   // general.
334   if (I->isEHPad())
335     return false;
336 
337   // We don't want debug info removed by anything this general, unless
338   // debug info is empty.
339   if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
340     if (DDI->getAddress())
341       return false;
342     return true;
343   }
344   if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
345     if (DVI->getValue())
346       return false;
347     return true;
348   }
349 
350   if (!I->mayHaveSideEffects())
351     return true;
352 
353   // Special case intrinsics that "may have side effects" but can be deleted
354   // when dead.
355   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
356     // Safe to delete llvm.stacksave if dead.
357     if (II->getIntrinsicID() == Intrinsic::stacksave)
358       return true;
359 
360     // Lifetime intrinsics are dead when their right-hand is undef.
361     if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
362         II->getIntrinsicID() == Intrinsic::lifetime_end)
363       return isa<UndefValue>(II->getArgOperand(1));
364 
365     // Assumptions are dead if their condition is trivially true.  Guards on
366     // true are operationally no-ops.  In the future we can consider more
367     // sophisticated tradeoffs for guards considering potential for check
368     // widening, but for now we keep things simple.
369     if (II->getIntrinsicID() == Intrinsic::assume ||
370         II->getIntrinsicID() == Intrinsic::experimental_guard) {
371       if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
372         return !Cond->isZero();
373 
374       return false;
375     }
376   }
377 
378   if (isAllocLikeFn(I, TLI))
379     return true;
380 
381   if (CallInst *CI = isFreeCall(I, TLI))
382     if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
383       return C->isNullValue() || isa<UndefValue>(C);
384 
385   if (CallSite CS = CallSite(I))
386     if (isMathLibCallNoop(CS, TLI))
387       return true;
388 
389   return false;
390 }
391 
392 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
393 /// trivially dead instruction, delete it.  If that makes any of its operands
394 /// trivially dead, delete them too, recursively.  Return true if any
395 /// instructions were deleted.
396 bool
397 llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
398                                                  const TargetLibraryInfo *TLI) {
399   Instruction *I = dyn_cast<Instruction>(V);
400   if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
401     return false;
402 
403   SmallVector<Instruction*, 16> DeadInsts;
404   DeadInsts.push_back(I);
405 
406   do {
407     I = DeadInsts.pop_back_val();
408 
409     // Null out all of the instruction's operands to see if any operand becomes
410     // dead as we go.
411     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
412       Value *OpV = I->getOperand(i);
413       I->setOperand(i, nullptr);
414 
415       if (!OpV->use_empty()) continue;
416 
417       // If the operand is an instruction that became dead as we nulled out the
418       // operand, and if it is 'trivially' dead, delete it in a future loop
419       // iteration.
420       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
421         if (isInstructionTriviallyDead(OpI, TLI))
422           DeadInsts.push_back(OpI);
423     }
424 
425     I->eraseFromParent();
426   } while (!DeadInsts.empty());
427 
428   return true;
429 }
430 
431 /// areAllUsesEqual - Check whether the uses of a value are all the same.
432 /// This is similar to Instruction::hasOneUse() except this will also return
433 /// true when there are no uses or multiple uses that all refer to the same
434 /// value.
435 static bool areAllUsesEqual(Instruction *I) {
436   Value::user_iterator UI = I->user_begin();
437   Value::user_iterator UE = I->user_end();
438   if (UI == UE)
439     return true;
440 
441   User *TheUse = *UI;
442   for (++UI; UI != UE; ++UI) {
443     if (*UI != TheUse)
444       return false;
445   }
446   return true;
447 }
448 
449 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
450 /// dead PHI node, due to being a def-use chain of single-use nodes that
451 /// either forms a cycle or is terminated by a trivially dead instruction,
452 /// delete it.  If that makes any of its operands trivially dead, delete them
453 /// too, recursively.  Return true if a change was made.
454 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
455                                         const TargetLibraryInfo *TLI) {
456   SmallPtrSet<Instruction*, 4> Visited;
457   for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
458        I = cast<Instruction>(*I->user_begin())) {
459     if (I->use_empty())
460       return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
461 
462     // If we find an instruction more than once, we're on a cycle that
463     // won't prove fruitful.
464     if (!Visited.insert(I).second) {
465       // Break the cycle and delete the instruction and its operands.
466       I->replaceAllUsesWith(UndefValue::get(I->getType()));
467       (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
468       return true;
469     }
470   }
471   return false;
472 }
473 
474 static bool
475 simplifyAndDCEInstruction(Instruction *I,
476                           SmallSetVector<Instruction *, 16> &WorkList,
477                           const DataLayout &DL,
478                           const TargetLibraryInfo *TLI) {
479   if (isInstructionTriviallyDead(I, TLI)) {
480     // Null out all of the instruction's operands to see if any operand becomes
481     // dead as we go.
482     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
483       Value *OpV = I->getOperand(i);
484       I->setOperand(i, nullptr);
485 
486       if (!OpV->use_empty() || I == OpV)
487         continue;
488 
489       // If the operand is an instruction that became dead as we nulled out the
490       // operand, and if it is 'trivially' dead, delete it in a future loop
491       // iteration.
492       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
493         if (isInstructionTriviallyDead(OpI, TLI))
494           WorkList.insert(OpI);
495     }
496 
497     I->eraseFromParent();
498 
499     return true;
500   }
501 
502   if (Value *SimpleV = SimplifyInstruction(I, DL)) {
503     // Add the users to the worklist. CAREFUL: an instruction can use itself,
504     // in the case of a phi node.
505     for (User *U : I->users()) {
506       if (U != I) {
507         WorkList.insert(cast<Instruction>(U));
508       }
509     }
510 
511     // Replace the instruction with its simplified value.
512     bool Changed = false;
513     if (!I->use_empty()) {
514       I->replaceAllUsesWith(SimpleV);
515       Changed = true;
516     }
517     if (isInstructionTriviallyDead(I, TLI)) {
518       I->eraseFromParent();
519       Changed = true;
520     }
521     return Changed;
522   }
523   return false;
524 }
525 
526 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
527 /// simplify any instructions in it and recursively delete dead instructions.
528 ///
529 /// This returns true if it changed the code, note that it can delete
530 /// instructions in other blocks as well in this block.
531 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
532                                        const TargetLibraryInfo *TLI) {
533   bool MadeChange = false;
534   const DataLayout &DL = BB->getModule()->getDataLayout();
535 
536 #ifndef NDEBUG
537   // In debug builds, ensure that the terminator of the block is never replaced
538   // or deleted by these simplifications. The idea of simplification is that it
539   // cannot introduce new instructions, and there is no way to replace the
540   // terminator of a block without introducing a new instruction.
541   AssertingVH<Instruction> TerminatorVH(&BB->back());
542 #endif
543 
544   SmallSetVector<Instruction *, 16> WorkList;
545   // Iterate over the original function, only adding insts to the worklist
546   // if they actually need to be revisited. This avoids having to pre-init
547   // the worklist with the entire function's worth of instructions.
548   for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
549        BI != E;) {
550     assert(!BI->isTerminator());
551     Instruction *I = &*BI;
552     ++BI;
553 
554     // We're visiting this instruction now, so make sure it's not in the
555     // worklist from an earlier visit.
556     if (!WorkList.count(I))
557       MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
558   }
559 
560   while (!WorkList.empty()) {
561     Instruction *I = WorkList.pop_back_val();
562     MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
563   }
564   return MadeChange;
565 }
566 
567 //===----------------------------------------------------------------------===//
568 //  Control Flow Graph Restructuring.
569 //
570 
571 /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
572 /// method is called when we're about to delete Pred as a predecessor of BB.  If
573 /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
574 ///
575 /// Unlike the removePredecessor method, this attempts to simplify uses of PHI
576 /// nodes that collapse into identity values.  For example, if we have:
577 ///   x = phi(1, 0, 0, 0)
578 ///   y = and x, z
579 ///
580 /// .. and delete the predecessor corresponding to the '1', this will attempt to
581 /// recursively fold the and to 0.
582 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred) {
583   // This only adjusts blocks with PHI nodes.
584   if (!isa<PHINode>(BB->begin()))
585     return;
586 
587   // Remove the entries for Pred from the PHI nodes in BB, but do not simplify
588   // them down.  This will leave us with single entry phi nodes and other phis
589   // that can be removed.
590   BB->removePredecessor(Pred, true);
591 
592   WeakTrackingVH PhiIt = &BB->front();
593   while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
594     PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
595     Value *OldPhiIt = PhiIt;
596 
597     if (!recursivelySimplifyInstruction(PN))
598       continue;
599 
600     // If recursive simplification ended up deleting the next PHI node we would
601     // iterate to, then our iterator is invalid, restart scanning from the top
602     // of the block.
603     if (PhiIt != OldPhiIt) PhiIt = &BB->front();
604   }
605 }
606 
607 /// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its
608 /// predecessor is known to have one successor (DestBB!).  Eliminate the edge
609 /// between them, moving the instructions in the predecessor into DestBB and
610 /// deleting the predecessor block.
611 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, DominatorTree *DT) {
612   // If BB has single-entry PHI nodes, fold them.
613   while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
614     Value *NewVal = PN->getIncomingValue(0);
615     // Replace self referencing PHI with undef, it must be dead.
616     if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
617     PN->replaceAllUsesWith(NewVal);
618     PN->eraseFromParent();
619   }
620 
621   BasicBlock *PredBB = DestBB->getSinglePredecessor();
622   assert(PredBB && "Block doesn't have a single predecessor!");
623 
624   // Zap anything that took the address of DestBB.  Not doing this will give the
625   // address an invalid value.
626   if (DestBB->hasAddressTaken()) {
627     BlockAddress *BA = BlockAddress::get(DestBB);
628     Constant *Replacement =
629       ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
630     BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
631                                                      BA->getType()));
632     BA->destroyConstant();
633   }
634 
635   // Anything that branched to PredBB now branches to DestBB.
636   PredBB->replaceAllUsesWith(DestBB);
637 
638   // Splice all the instructions from PredBB to DestBB.
639   PredBB->getTerminator()->eraseFromParent();
640   DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
641 
642   // If the PredBB is the entry block of the function, move DestBB up to
643   // become the entry block after we erase PredBB.
644   if (PredBB == &DestBB->getParent()->getEntryBlock())
645     DestBB->moveAfter(PredBB);
646 
647   if (DT) {
648     // For some irreducible CFG we end up having forward-unreachable blocks
649     // so check if getNode returns a valid node before updating the domtree.
650     if (DomTreeNode *DTN = DT->getNode(PredBB)) {
651       BasicBlock *PredBBIDom = DTN->getIDom()->getBlock();
652       DT->changeImmediateDominator(DestBB, PredBBIDom);
653       DT->eraseNode(PredBB);
654     }
655   }
656   // Nuke BB.
657   PredBB->eraseFromParent();
658 }
659 
660 /// CanMergeValues - Return true if we can choose one of these values to use
661 /// in place of the other. Note that we will always choose the non-undef
662 /// value to keep.
663 static bool CanMergeValues(Value *First, Value *Second) {
664   return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
665 }
666 
667 /// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an
668 /// almost-empty BB ending in an unconditional branch to Succ, into Succ.
669 ///
670 /// Assumption: Succ is the single successor for BB.
671 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
672   assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
673 
674   DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
675         << Succ->getName() << "\n");
676   // Shortcut, if there is only a single predecessor it must be BB and merging
677   // is always safe
678   if (Succ->getSinglePredecessor()) return true;
679 
680   // Make a list of the predecessors of BB
681   SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
682 
683   // Look at all the phi nodes in Succ, to see if they present a conflict when
684   // merging these blocks
685   for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
686     PHINode *PN = cast<PHINode>(I);
687 
688     // If the incoming value from BB is again a PHINode in
689     // BB which has the same incoming value for *PI as PN does, we can
690     // merge the phi nodes and then the blocks can still be merged
691     PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
692     if (BBPN && BBPN->getParent() == BB) {
693       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
694         BasicBlock *IBB = PN->getIncomingBlock(PI);
695         if (BBPreds.count(IBB) &&
696             !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
697                             PN->getIncomingValue(PI))) {
698           DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
699                 << Succ->getName() << " is conflicting with "
700                 << BBPN->getName() << " with regard to common predecessor "
701                 << IBB->getName() << "\n");
702           return false;
703         }
704       }
705     } else {
706       Value* Val = PN->getIncomingValueForBlock(BB);
707       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
708         // See if the incoming value for the common predecessor is equal to the
709         // one for BB, in which case this phi node will not prevent the merging
710         // of the block.
711         BasicBlock *IBB = PN->getIncomingBlock(PI);
712         if (BBPreds.count(IBB) &&
713             !CanMergeValues(Val, PN->getIncomingValue(PI))) {
714           DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
715                 << Succ->getName() << " is conflicting with regard to common "
716                 << "predecessor " << IBB->getName() << "\n");
717           return false;
718         }
719       }
720     }
721   }
722 
723   return true;
724 }
725 
726 using PredBlockVector = SmallVector<BasicBlock *, 16>;
727 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
728 
729 /// \brief Determines the value to use as the phi node input for a block.
730 ///
731 /// Select between \p OldVal any value that we know flows from \p BB
732 /// to a particular phi on the basis of which one (if either) is not
733 /// undef. Update IncomingValues based on the selected value.
734 ///
735 /// \param OldVal The value we are considering selecting.
736 /// \param BB The block that the value flows in from.
737 /// \param IncomingValues A map from block-to-value for other phi inputs
738 /// that we have examined.
739 ///
740 /// \returns the selected value.
741 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
742                                           IncomingValueMap &IncomingValues) {
743   if (!isa<UndefValue>(OldVal)) {
744     assert((!IncomingValues.count(BB) ||
745             IncomingValues.find(BB)->second == OldVal) &&
746            "Expected OldVal to match incoming value from BB!");
747 
748     IncomingValues.insert(std::make_pair(BB, OldVal));
749     return OldVal;
750   }
751 
752   IncomingValueMap::const_iterator It = IncomingValues.find(BB);
753   if (It != IncomingValues.end()) return It->second;
754 
755   return OldVal;
756 }
757 
758 /// \brief Create a map from block to value for the operands of a
759 /// given phi.
760 ///
761 /// Create a map from block to value for each non-undef value flowing
762 /// into \p PN.
763 ///
764 /// \param PN The phi we are collecting the map for.
765 /// \param IncomingValues [out] The map from block to value for this phi.
766 static void gatherIncomingValuesToPhi(PHINode *PN,
767                                       IncomingValueMap &IncomingValues) {
768   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
769     BasicBlock *BB = PN->getIncomingBlock(i);
770     Value *V = PN->getIncomingValue(i);
771 
772     if (!isa<UndefValue>(V))
773       IncomingValues.insert(std::make_pair(BB, V));
774   }
775 }
776 
777 /// \brief Replace the incoming undef values to a phi with the values
778 /// from a block-to-value map.
779 ///
780 /// \param PN The phi we are replacing the undefs in.
781 /// \param IncomingValues A map from block to value.
782 static void replaceUndefValuesInPhi(PHINode *PN,
783                                     const IncomingValueMap &IncomingValues) {
784   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
785     Value *V = PN->getIncomingValue(i);
786 
787     if (!isa<UndefValue>(V)) continue;
788 
789     BasicBlock *BB = PN->getIncomingBlock(i);
790     IncomingValueMap::const_iterator It = IncomingValues.find(BB);
791     if (It == IncomingValues.end()) continue;
792 
793     PN->setIncomingValue(i, It->second);
794   }
795 }
796 
797 /// \brief Replace a value flowing from a block to a phi with
798 /// potentially multiple instances of that value flowing from the
799 /// block's predecessors to the phi.
800 ///
801 /// \param BB The block with the value flowing into the phi.
802 /// \param BBPreds The predecessors of BB.
803 /// \param PN The phi that we are updating.
804 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
805                                                 const PredBlockVector &BBPreds,
806                                                 PHINode *PN) {
807   Value *OldVal = PN->removeIncomingValue(BB, false);
808   assert(OldVal && "No entry in PHI for Pred BB!");
809 
810   IncomingValueMap IncomingValues;
811 
812   // We are merging two blocks - BB, and the block containing PN - and
813   // as a result we need to redirect edges from the predecessors of BB
814   // to go to the block containing PN, and update PN
815   // accordingly. Since we allow merging blocks in the case where the
816   // predecessor and successor blocks both share some predecessors,
817   // and where some of those common predecessors might have undef
818   // values flowing into PN, we want to rewrite those values to be
819   // consistent with the non-undef values.
820 
821   gatherIncomingValuesToPhi(PN, IncomingValues);
822 
823   // If this incoming value is one of the PHI nodes in BB, the new entries
824   // in the PHI node are the entries from the old PHI.
825   if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
826     PHINode *OldValPN = cast<PHINode>(OldVal);
827     for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
828       // Note that, since we are merging phi nodes and BB and Succ might
829       // have common predecessors, we could end up with a phi node with
830       // identical incoming branches. This will be cleaned up later (and
831       // will trigger asserts if we try to clean it up now, without also
832       // simplifying the corresponding conditional branch).
833       BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
834       Value *PredVal = OldValPN->getIncomingValue(i);
835       Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
836                                                     IncomingValues);
837 
838       // And add a new incoming value for this predecessor for the
839       // newly retargeted branch.
840       PN->addIncoming(Selected, PredBB);
841     }
842   } else {
843     for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
844       // Update existing incoming values in PN for this
845       // predecessor of BB.
846       BasicBlock *PredBB = BBPreds[i];
847       Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
848                                                     IncomingValues);
849 
850       // And add a new incoming value for this predecessor for the
851       // newly retargeted branch.
852       PN->addIncoming(Selected, PredBB);
853     }
854   }
855 
856   replaceUndefValuesInPhi(PN, IncomingValues);
857 }
858 
859 /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
860 /// unconditional branch, and contains no instructions other than PHI nodes,
861 /// potential side-effect free intrinsics and the branch.  If possible,
862 /// eliminate BB by rewriting all the predecessors to branch to the successor
863 /// block and return true.  If we can't transform, return false.
864 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
865   assert(BB != &BB->getParent()->getEntryBlock() &&
866          "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
867 
868   // We can't eliminate infinite loops.
869   BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
870   if (BB == Succ) return false;
871 
872   // Check to see if merging these blocks would cause conflicts for any of the
873   // phi nodes in BB or Succ. If not, we can safely merge.
874   if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
875 
876   // Check for cases where Succ has multiple predecessors and a PHI node in BB
877   // has uses which will not disappear when the PHI nodes are merged.  It is
878   // possible to handle such cases, but difficult: it requires checking whether
879   // BB dominates Succ, which is non-trivial to calculate in the case where
880   // Succ has multiple predecessors.  Also, it requires checking whether
881   // constructing the necessary self-referential PHI node doesn't introduce any
882   // conflicts; this isn't too difficult, but the previous code for doing this
883   // was incorrect.
884   //
885   // Note that if this check finds a live use, BB dominates Succ, so BB is
886   // something like a loop pre-header (or rarely, a part of an irreducible CFG);
887   // folding the branch isn't profitable in that case anyway.
888   if (!Succ->getSinglePredecessor()) {
889     BasicBlock::iterator BBI = BB->begin();
890     while (isa<PHINode>(*BBI)) {
891       for (Use &U : BBI->uses()) {
892         if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
893           if (PN->getIncomingBlock(U) != BB)
894             return false;
895         } else {
896           return false;
897         }
898       }
899       ++BBI;
900     }
901   }
902 
903   DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
904 
905   if (isa<PHINode>(Succ->begin())) {
906     // If there is more than one pred of succ, and there are PHI nodes in
907     // the successor, then we need to add incoming edges for the PHI nodes
908     //
909     const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
910 
911     // Loop over all of the PHI nodes in the successor of BB.
912     for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
913       PHINode *PN = cast<PHINode>(I);
914 
915       redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
916     }
917   }
918 
919   if (Succ->getSinglePredecessor()) {
920     // BB is the only predecessor of Succ, so Succ will end up with exactly
921     // the same predecessors BB had.
922 
923     // Copy over any phi, debug or lifetime instruction.
924     BB->getTerminator()->eraseFromParent();
925     Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
926                                BB->getInstList());
927   } else {
928     while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
929       // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
930       assert(PN->use_empty() && "There shouldn't be any uses here!");
931       PN->eraseFromParent();
932     }
933   }
934 
935   // If the unconditional branch we replaced contains llvm.loop metadata, we
936   // add the metadata to the branch instructions in the predecessors.
937   unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
938   Instruction *TI = BB->getTerminator();
939   if (TI)
940     if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
941       for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
942         BasicBlock *Pred = *PI;
943         Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
944       }
945 
946   // Everything that jumped to BB now goes to Succ.
947   BB->replaceAllUsesWith(Succ);
948   if (!Succ->hasName()) Succ->takeName(BB);
949   BB->eraseFromParent();              // Delete the old basic block.
950   return true;
951 }
952 
953 /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
954 /// nodes in this block. This doesn't try to be clever about PHI nodes
955 /// which differ only in the order of the incoming values, but instcombine
956 /// orders them so it usually won't matter.
957 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
958   // This implementation doesn't currently consider undef operands
959   // specially. Theoretically, two phis which are identical except for
960   // one having an undef where the other doesn't could be collapsed.
961 
962   struct PHIDenseMapInfo {
963     static PHINode *getEmptyKey() {
964       return DenseMapInfo<PHINode *>::getEmptyKey();
965     }
966 
967     static PHINode *getTombstoneKey() {
968       return DenseMapInfo<PHINode *>::getTombstoneKey();
969     }
970 
971     static unsigned getHashValue(PHINode *PN) {
972       // Compute a hash value on the operands. Instcombine will likely have
973       // sorted them, which helps expose duplicates, but we have to check all
974       // the operands to be safe in case instcombine hasn't run.
975       return static_cast<unsigned>(hash_combine(
976           hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
977           hash_combine_range(PN->block_begin(), PN->block_end())));
978     }
979 
980     static bool isEqual(PHINode *LHS, PHINode *RHS) {
981       if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
982           RHS == getEmptyKey() || RHS == getTombstoneKey())
983         return LHS == RHS;
984       return LHS->isIdenticalTo(RHS);
985     }
986   };
987 
988   // Set of unique PHINodes.
989   DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
990 
991   // Examine each PHI.
992   bool Changed = false;
993   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
994     auto Inserted = PHISet.insert(PN);
995     if (!Inserted.second) {
996       // A duplicate. Replace this PHI with its duplicate.
997       PN->replaceAllUsesWith(*Inserted.first);
998       PN->eraseFromParent();
999       Changed = true;
1000 
1001       // The RAUW can change PHIs that we already visited. Start over from the
1002       // beginning.
1003       PHISet.clear();
1004       I = BB->begin();
1005     }
1006   }
1007 
1008   return Changed;
1009 }
1010 
1011 /// enforceKnownAlignment - If the specified pointer points to an object that
1012 /// we control, modify the object's alignment to PrefAlign. This isn't
1013 /// often possible though. If alignment is important, a more reliable approach
1014 /// is to simply align all global variables and allocation instructions to
1015 /// their preferred alignment from the beginning.
1016 static unsigned enforceKnownAlignment(Value *V, unsigned Align,
1017                                       unsigned PrefAlign,
1018                                       const DataLayout &DL) {
1019   assert(PrefAlign > Align);
1020 
1021   V = V->stripPointerCasts();
1022 
1023   if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1024     // TODO: ideally, computeKnownBits ought to have used
1025     // AllocaInst::getAlignment() in its computation already, making
1026     // the below max redundant. But, as it turns out,
1027     // stripPointerCasts recurses through infinite layers of bitcasts,
1028     // while computeKnownBits is not allowed to traverse more than 6
1029     // levels.
1030     Align = std::max(AI->getAlignment(), Align);
1031     if (PrefAlign <= Align)
1032       return Align;
1033 
1034     // If the preferred alignment is greater than the natural stack alignment
1035     // then don't round up. This avoids dynamic stack realignment.
1036     if (DL.exceedsNaturalStackAlignment(PrefAlign))
1037       return Align;
1038     AI->setAlignment(PrefAlign);
1039     return PrefAlign;
1040   }
1041 
1042   if (auto *GO = dyn_cast<GlobalObject>(V)) {
1043     // TODO: as above, this shouldn't be necessary.
1044     Align = std::max(GO->getAlignment(), Align);
1045     if (PrefAlign <= Align)
1046       return Align;
1047 
1048     // If there is a large requested alignment and we can, bump up the alignment
1049     // of the global.  If the memory we set aside for the global may not be the
1050     // memory used by the final program then it is impossible for us to reliably
1051     // enforce the preferred alignment.
1052     if (!GO->canIncreaseAlignment())
1053       return Align;
1054 
1055     GO->setAlignment(PrefAlign);
1056     return PrefAlign;
1057   }
1058 
1059   return Align;
1060 }
1061 
1062 unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
1063                                           const DataLayout &DL,
1064                                           const Instruction *CxtI,
1065                                           AssumptionCache *AC,
1066                                           const DominatorTree *DT) {
1067   assert(V->getType()->isPointerTy() &&
1068          "getOrEnforceKnownAlignment expects a pointer!");
1069 
1070   KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1071   unsigned TrailZ = Known.countMinTrailingZeros();
1072 
1073   // Avoid trouble with ridiculously large TrailZ values, such as
1074   // those computed from a null pointer.
1075   TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
1076 
1077   unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ);
1078 
1079   // LLVM doesn't support alignments larger than this currently.
1080   Align = std::min(Align, +Value::MaximumAlignment);
1081 
1082   if (PrefAlign > Align)
1083     Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
1084 
1085   // We don't need to make any adjustment.
1086   return Align;
1087 }
1088 
1089 ///===---------------------------------------------------------------------===//
1090 ///  Dbg Intrinsic utilities
1091 ///
1092 
1093 /// See if there is a dbg.value intrinsic for DIVar before I.
1094 static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr,
1095                               Instruction *I) {
1096   // Since we can't guarantee that the original dbg.declare instrinsic
1097   // is removed by LowerDbgDeclare(), we need to make sure that we are
1098   // not inserting the same dbg.value intrinsic over and over.
1099   BasicBlock::InstListType::iterator PrevI(I);
1100   if (PrevI != I->getParent()->getInstList().begin()) {
1101     --PrevI;
1102     if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI))
1103       if (DVI->getValue() == I->getOperand(0) &&
1104           DVI->getVariable() == DIVar &&
1105           DVI->getExpression() == DIExpr)
1106         return true;
1107   }
1108   return false;
1109 }
1110 
1111 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1112 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1113                              DIExpression *DIExpr,
1114                              PHINode *APN) {
1115   // Since we can't guarantee that the original dbg.declare instrinsic
1116   // is removed by LowerDbgDeclare(), we need to make sure that we are
1117   // not inserting the same dbg.value intrinsic over and over.
1118   SmallVector<DbgValueInst *, 1> DbgValues;
1119   findDbgValues(DbgValues, APN);
1120   for (auto *DVI : DbgValues) {
1121     assert(DVI->getValue() == APN);
1122     if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1123       return true;
1124   }
1125   return false;
1126 }
1127 
1128 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1129 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1130 void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1131                                            StoreInst *SI, DIBuilder &Builder) {
1132   assert(DII->isAddressOfVariable());
1133   auto *DIVar = DII->getVariable();
1134   assert(DIVar && "Missing variable");
1135   auto *DIExpr = DII->getExpression();
1136   Value *DV = SI->getOperand(0);
1137 
1138   // If an argument is zero extended then use argument directly. The ZExt
1139   // may be zapped by an optimization pass in future.
1140   Argument *ExtendedArg = nullptr;
1141   if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1142     ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
1143   if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1144     ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
1145   if (ExtendedArg) {
1146     // If this DII was already describing only a fragment of a variable, ensure
1147     // that fragment is appropriately narrowed here.
1148     // But if a fragment wasn't used, describe the value as the original
1149     // argument (rather than the zext or sext) so that it remains described even
1150     // if the sext/zext is optimized away. This widens the variable description,
1151     // leaving it up to the consumer to know how the smaller value may be
1152     // represented in a larger register.
1153     if (auto Fragment = DIExpr->getFragmentInfo()) {
1154       unsigned FragmentOffset = Fragment->OffsetInBits;
1155       SmallVector<uint64_t, 3> Ops(DIExpr->elements_begin(),
1156                                    DIExpr->elements_end() - 3);
1157       Ops.push_back(dwarf::DW_OP_LLVM_fragment);
1158       Ops.push_back(FragmentOffset);
1159       const DataLayout &DL = DII->getModule()->getDataLayout();
1160       Ops.push_back(DL.getTypeSizeInBits(ExtendedArg->getType()));
1161       DIExpr = Builder.createExpression(Ops);
1162     }
1163     DV = ExtendedArg;
1164   }
1165   if (!LdStHasDebugValue(DIVar, DIExpr, SI))
1166     Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, DII->getDebugLoc(),
1167                                     SI);
1168 }
1169 
1170 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1171 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1172 void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1173                                            LoadInst *LI, DIBuilder &Builder) {
1174   auto *DIVar = DII->getVariable();
1175   auto *DIExpr = DII->getExpression();
1176   assert(DIVar && "Missing variable");
1177 
1178   if (LdStHasDebugValue(DIVar, DIExpr, LI))
1179     return;
1180 
1181   // We are now tracking the loaded value instead of the address. In the
1182   // future if multi-location support is added to the IR, it might be
1183   // preferable to keep tracking both the loaded value and the original
1184   // address in case the alloca can not be elided.
1185   Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1186       LI, DIVar, DIExpr, DII->getDebugLoc(), (Instruction *)nullptr);
1187   DbgValue->insertAfter(LI);
1188 }
1189 
1190 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1191 /// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1192 void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1193                                            PHINode *APN, DIBuilder &Builder) {
1194   auto *DIVar = DII->getVariable();
1195   auto *DIExpr = DII->getExpression();
1196   assert(DIVar && "Missing variable");
1197 
1198   if (PhiHasDebugValue(DIVar, DIExpr, APN))
1199     return;
1200 
1201   BasicBlock *BB = APN->getParent();
1202   auto InsertionPt = BB->getFirstInsertionPt();
1203 
1204   // The block may be a catchswitch block, which does not have a valid
1205   // insertion point.
1206   // FIXME: Insert dbg.value markers in the successors when appropriate.
1207   if (InsertionPt != BB->end())
1208     Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, DII->getDebugLoc(),
1209                                     &*InsertionPt);
1210 }
1211 
1212 /// Determine whether this alloca is either a VLA or an array.
1213 static bool isArray(AllocaInst *AI) {
1214   return AI->isArrayAllocation() ||
1215     AI->getType()->getElementType()->isArrayTy();
1216 }
1217 
1218 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1219 /// of llvm.dbg.value intrinsics.
1220 bool llvm::LowerDbgDeclare(Function &F) {
1221   DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1222   SmallVector<DbgDeclareInst *, 4> Dbgs;
1223   for (auto &FI : F)
1224     for (Instruction &BI : FI)
1225       if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1226         Dbgs.push_back(DDI);
1227 
1228   if (Dbgs.empty())
1229     return false;
1230 
1231   for (auto &I : Dbgs) {
1232     DbgDeclareInst *DDI = I;
1233     AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1234     // If this is an alloca for a scalar variable, insert a dbg.value
1235     // at each load and store to the alloca and erase the dbg.declare.
1236     // The dbg.values allow tracking a variable even if it is not
1237     // stored on the stack, while the dbg.declare can only describe
1238     // the stack slot (and at a lexical-scope granularity). Later
1239     // passes will attempt to elide the stack slot.
1240     if (AI && !isArray(AI)) {
1241       for (auto &AIUse : AI->uses()) {
1242         User *U = AIUse.getUser();
1243         if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1244           if (AIUse.getOperandNo() == 1)
1245             ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1246         } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1247           ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1248         } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1249           // This is a call by-value or some other instruction that
1250           // takes a pointer to the variable. Insert a *value*
1251           // intrinsic that describes the alloca.
1252           DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(),
1253                                       DDI->getExpression(), DDI->getDebugLoc(),
1254                                       CI);
1255         }
1256       }
1257       DDI->eraseFromParent();
1258     }
1259   }
1260   return true;
1261 }
1262 
1263 /// Finds all intrinsics declaring local variables as living in the memory that
1264 /// 'V' points to. This may include a mix of dbg.declare and
1265 /// dbg.addr intrinsics.
1266 TinyPtrVector<DbgInfoIntrinsic *> llvm::FindDbgAddrUses(Value *V) {
1267   auto *L = LocalAsMetadata::getIfExists(V);
1268   if (!L)
1269     return {};
1270   auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L);
1271   if (!MDV)
1272     return {};
1273 
1274   TinyPtrVector<DbgInfoIntrinsic *> Declares;
1275   for (User *U : MDV->users()) {
1276     if (auto *DII = dyn_cast<DbgInfoIntrinsic>(U))
1277       if (DII->isAddressOfVariable())
1278         Declares.push_back(DII);
1279   }
1280 
1281   return Declares;
1282 }
1283 
1284 void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) {
1285   if (auto *L = LocalAsMetadata::getIfExists(V))
1286     if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1287       for (User *U : MDV->users())
1288         if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
1289           DbgValues.push_back(DVI);
1290 }
1291 
1292 void llvm::findDbgUsers(SmallVectorImpl<DbgInfoIntrinsic *> &DbgUsers,
1293                         Value *V) {
1294   if (auto *L = LocalAsMetadata::getIfExists(V))
1295     if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1296       for (User *U : MDV->users())
1297         if (DbgInfoIntrinsic *DII = dyn_cast<DbgInfoIntrinsic>(U))
1298           DbgUsers.push_back(DII);
1299 }
1300 
1301 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1302                              Instruction *InsertBefore, DIBuilder &Builder,
1303                              bool DerefBefore, int Offset, bool DerefAfter) {
1304   auto DbgAddrs = FindDbgAddrUses(Address);
1305   for (DbgInfoIntrinsic *DII : DbgAddrs) {
1306     DebugLoc Loc = DII->getDebugLoc();
1307     auto *DIVar = DII->getVariable();
1308     auto *DIExpr = DII->getExpression();
1309     assert(DIVar && "Missing variable");
1310     DIExpr = DIExpression::prepend(DIExpr, DerefBefore, Offset, DerefAfter);
1311     // Insert llvm.dbg.declare immediately after InsertBefore, and remove old
1312     // llvm.dbg.declare.
1313     Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore);
1314     if (DII == InsertBefore)
1315       InsertBefore = &*std::next(InsertBefore->getIterator());
1316     DII->eraseFromParent();
1317   }
1318   return !DbgAddrs.empty();
1319 }
1320 
1321 bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1322                                       DIBuilder &Builder, bool DerefBefore,
1323                                       int Offset, bool DerefAfter) {
1324   return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder,
1325                            DerefBefore, Offset, DerefAfter);
1326 }
1327 
1328 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1329                                         DIBuilder &Builder, int Offset) {
1330   DebugLoc Loc = DVI->getDebugLoc();
1331   auto *DIVar = DVI->getVariable();
1332   auto *DIExpr = DVI->getExpression();
1333   assert(DIVar && "Missing variable");
1334 
1335   // This is an alloca-based llvm.dbg.value. The first thing it should do with
1336   // the alloca pointer is dereference it. Otherwise we don't know how to handle
1337   // it and give up.
1338   if (!DIExpr || DIExpr->getNumElements() < 1 ||
1339       DIExpr->getElement(0) != dwarf::DW_OP_deref)
1340     return;
1341 
1342   // Insert the offset immediately after the first deref.
1343   // We could just change the offset argument of dbg.value, but it's unsigned...
1344   if (Offset) {
1345     SmallVector<uint64_t, 4> Ops;
1346     Ops.push_back(dwarf::DW_OP_deref);
1347     DIExpression::appendOffset(Ops, Offset);
1348     Ops.append(DIExpr->elements_begin() + 1, DIExpr->elements_end());
1349     DIExpr = Builder.createExpression(Ops);
1350   }
1351 
1352   Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1353   DVI->eraseFromParent();
1354 }
1355 
1356 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1357                                     DIBuilder &Builder, int Offset) {
1358   if (auto *L = LocalAsMetadata::getIfExists(AI))
1359     if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1360       for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) {
1361         Use &U = *UI++;
1362         if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1363           replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1364       }
1365 }
1366 
1367 void llvm::salvageDebugInfo(Instruction &I) {
1368   SmallVector<DbgInfoIntrinsic *, 1> DbgUsers;
1369   findDbgUsers(DbgUsers, &I);
1370   if (DbgUsers.empty())
1371     return;
1372 
1373   auto &M = *I.getModule();
1374 
1375   auto wrapMD = [&](Value *V) {
1376     return MetadataAsValue::get(I.getContext(), ValueAsMetadata::get(V));
1377   };
1378 
1379   auto applyOffset = [&](DbgInfoIntrinsic *DII, uint64_t Offset) {
1380     auto *DIExpr = DII->getExpression();
1381     DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset,
1382                                    DIExpression::NoDeref,
1383                                    DIExpression::WithStackValue);
1384     DII->setOperand(0, wrapMD(I.getOperand(0)));
1385     DII->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
1386     DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1387   };
1388 
1389   if (isa<BitCastInst>(&I) || isa<IntToPtrInst>(&I)) {
1390     // Bitcasts are entirely irrelevant for debug info. Rewrite dbg.value,
1391     // dbg.addr, and dbg.declare to use the cast's source.
1392     for (auto *DII : DbgUsers) {
1393       DII->setOperand(0, wrapMD(I.getOperand(0)));
1394       DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1395     }
1396   } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1397     unsigned BitWidth =
1398         M.getDataLayout().getPointerSizeInBits(GEP->getPointerAddressSpace());
1399     // Rewrite a constant GEP into a DIExpression.  Since we are performing
1400     // arithmetic to compute the variable's *value* in the DIExpression, we
1401     // need to mark the expression with a DW_OP_stack_value.
1402     APInt Offset(BitWidth, 0);
1403     if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset))
1404       for (auto *DII : DbgUsers)
1405         applyOffset(DII, Offset.getSExtValue());
1406   } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1407     if (BI->getOpcode() == Instruction::Add)
1408       if (auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1)))
1409         if (ConstInt->getBitWidth() <= 64)
1410           for (auto *DII : DbgUsers)
1411             applyOffset(DII, ConstInt->getSExtValue());
1412   } else if (isa<LoadInst>(&I)) {
1413     MetadataAsValue *AddrMD = wrapMD(I.getOperand(0));
1414     for (auto *DII : DbgUsers) {
1415       // Rewrite the load into DW_OP_deref.
1416       auto *DIExpr = DII->getExpression();
1417       DIExpr = DIExpression::prepend(DIExpr, DIExpression::WithDeref);
1418       DII->setOperand(0, AddrMD);
1419       DII->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
1420       DEBUG(dbgs() << "SALVAGE:  " << *DII << '\n');
1421     }
1422   }
1423 }
1424 
1425 unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
1426   unsigned NumDeadInst = 0;
1427   // Delete the instructions backwards, as it has a reduced likelihood of
1428   // having to update as many def-use and use-def chains.
1429   Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1430   while (EndInst != &BB->front()) {
1431     // Delete the next to last instruction.
1432     Instruction *Inst = &*--EndInst->getIterator();
1433     if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
1434       Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1435     if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
1436       EndInst = Inst;
1437       continue;
1438     }
1439     if (!isa<DbgInfoIntrinsic>(Inst))
1440       ++NumDeadInst;
1441     Inst->eraseFromParent();
1442   }
1443   return NumDeadInst;
1444 }
1445 
1446 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
1447                                    bool PreserveLCSSA) {
1448   BasicBlock *BB = I->getParent();
1449   // Loop over all of the successors, removing BB's entry from any PHI
1450   // nodes.
1451   for (BasicBlock *Successor : successors(BB))
1452     Successor->removePredecessor(BB, PreserveLCSSA);
1453 
1454   // Insert a call to llvm.trap right before this.  This turns the undefined
1455   // behavior into a hard fail instead of falling through into random code.
1456   if (UseLLVMTrap) {
1457     Function *TrapFn =
1458       Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
1459     CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
1460     CallTrap->setDebugLoc(I->getDebugLoc());
1461   }
1462   new UnreachableInst(I->getContext(), I);
1463 
1464   // All instructions after this are dead.
1465   unsigned NumInstrsRemoved = 0;
1466   BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
1467   while (BBI != BBE) {
1468     if (!BBI->use_empty())
1469       BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
1470     BB->getInstList().erase(BBI++);
1471     ++NumInstrsRemoved;
1472   }
1473   return NumInstrsRemoved;
1474 }
1475 
1476 /// changeToCall - Convert the specified invoke into a normal call.
1477 static void changeToCall(InvokeInst *II) {
1478   SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end());
1479   SmallVector<OperandBundleDef, 1> OpBundles;
1480   II->getOperandBundlesAsDefs(OpBundles);
1481   CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, OpBundles,
1482                                        "", II);
1483   NewCall->takeName(II);
1484   NewCall->setCallingConv(II->getCallingConv());
1485   NewCall->setAttributes(II->getAttributes());
1486   NewCall->setDebugLoc(II->getDebugLoc());
1487   II->replaceAllUsesWith(NewCall);
1488 
1489   // Follow the call by a branch to the normal destination.
1490   BranchInst::Create(II->getNormalDest(), II);
1491 
1492   // Update PHI nodes in the unwind destination
1493   II->getUnwindDest()->removePredecessor(II->getParent());
1494   II->eraseFromParent();
1495 }
1496 
1497 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
1498                                                    BasicBlock *UnwindEdge) {
1499   BasicBlock *BB = CI->getParent();
1500 
1501   // Convert this function call into an invoke instruction.  First, split the
1502   // basic block.
1503   BasicBlock *Split =
1504       BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
1505 
1506   // Delete the unconditional branch inserted by splitBasicBlock
1507   BB->getInstList().pop_back();
1508 
1509   // Create the new invoke instruction.
1510   SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
1511   SmallVector<OperandBundleDef, 1> OpBundles;
1512 
1513   CI->getOperandBundlesAsDefs(OpBundles);
1514 
1515   // Note: we're round tripping operand bundles through memory here, and that
1516   // can potentially be avoided with a cleverer API design that we do not have
1517   // as of this time.
1518 
1519   InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge,
1520                                       InvokeArgs, OpBundles, CI->getName(), BB);
1521   II->setDebugLoc(CI->getDebugLoc());
1522   II->setCallingConv(CI->getCallingConv());
1523   II->setAttributes(CI->getAttributes());
1524 
1525   // Make sure that anything using the call now uses the invoke!  This also
1526   // updates the CallGraph if present, because it uses a WeakTrackingVH.
1527   CI->replaceAllUsesWith(II);
1528 
1529   // Delete the original call
1530   Split->getInstList().pop_front();
1531   return Split;
1532 }
1533 
1534 static bool markAliveBlocks(Function &F,
1535                             SmallPtrSetImpl<BasicBlock*> &Reachable) {
1536   SmallVector<BasicBlock*, 128> Worklist;
1537   BasicBlock *BB = &F.front();
1538   Worklist.push_back(BB);
1539   Reachable.insert(BB);
1540   bool Changed = false;
1541   do {
1542     BB = Worklist.pop_back_val();
1543 
1544     // Do a quick scan of the basic block, turning any obviously unreachable
1545     // instructions into LLVM unreachable insts.  The instruction combining pass
1546     // canonicalizes unreachable insts into stores to null or undef.
1547     for (Instruction &I : *BB) {
1548       // Assumptions that are known to be false are equivalent to unreachable.
1549       // Also, if the condition is undefined, then we make the choice most
1550       // beneficial to the optimizer, and choose that to also be unreachable.
1551       if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
1552         if (II->getIntrinsicID() == Intrinsic::assume) {
1553           if (match(II->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
1554             // Don't insert a call to llvm.trap right before the unreachable.
1555             changeToUnreachable(II, false);
1556             Changed = true;
1557             break;
1558           }
1559         }
1560 
1561         if (II->getIntrinsicID() == Intrinsic::experimental_guard) {
1562           // A call to the guard intrinsic bails out of the current compilation
1563           // unit if the predicate passed to it is false.  If the predicate is a
1564           // constant false, then we know the guard will bail out of the current
1565           // compile unconditionally, so all code following it is dead.
1566           //
1567           // Note: unlike in llvm.assume, it is not "obviously profitable" for
1568           // guards to treat `undef` as `false` since a guard on `undef` can
1569           // still be useful for widening.
1570           if (match(II->getArgOperand(0), m_Zero()))
1571             if (!isa<UnreachableInst>(II->getNextNode())) {
1572               changeToUnreachable(II->getNextNode(), /*UseLLVMTrap=*/ false);
1573               Changed = true;
1574               break;
1575             }
1576         }
1577       }
1578 
1579       if (auto *CI = dyn_cast<CallInst>(&I)) {
1580         Value *Callee = CI->getCalledValue();
1581         if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1582           changeToUnreachable(CI, /*UseLLVMTrap=*/false);
1583           Changed = true;
1584           break;
1585         }
1586         if (CI->doesNotReturn()) {
1587           // If we found a call to a no-return function, insert an unreachable
1588           // instruction after it.  Make sure there isn't *already* one there
1589           // though.
1590           if (!isa<UnreachableInst>(CI->getNextNode())) {
1591             // Don't insert a call to llvm.trap right before the unreachable.
1592             changeToUnreachable(CI->getNextNode(), false);
1593             Changed = true;
1594           }
1595           break;
1596         }
1597       }
1598 
1599       // Store to undef and store to null are undefined and used to signal that
1600       // they should be changed to unreachable by passes that can't modify the
1601       // CFG.
1602       if (auto *SI = dyn_cast<StoreInst>(&I)) {
1603         // Don't touch volatile stores.
1604         if (SI->isVolatile()) continue;
1605 
1606         Value *Ptr = SI->getOperand(1);
1607 
1608         if (isa<UndefValue>(Ptr) ||
1609             (isa<ConstantPointerNull>(Ptr) &&
1610              SI->getPointerAddressSpace() == 0)) {
1611           changeToUnreachable(SI, true);
1612           Changed = true;
1613           break;
1614         }
1615       }
1616     }
1617 
1618     TerminatorInst *Terminator = BB->getTerminator();
1619     if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
1620       // Turn invokes that call 'nounwind' functions into ordinary calls.
1621       Value *Callee = II->getCalledValue();
1622       if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1623         changeToUnreachable(II, true);
1624         Changed = true;
1625       } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
1626         if (II->use_empty() && II->onlyReadsMemory()) {
1627           // jump to the normal destination branch.
1628           BranchInst::Create(II->getNormalDest(), II);
1629           II->getUnwindDest()->removePredecessor(II->getParent());
1630           II->eraseFromParent();
1631         } else
1632           changeToCall(II);
1633         Changed = true;
1634       }
1635     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
1636       // Remove catchpads which cannot be reached.
1637       struct CatchPadDenseMapInfo {
1638         static CatchPadInst *getEmptyKey() {
1639           return DenseMapInfo<CatchPadInst *>::getEmptyKey();
1640         }
1641 
1642         static CatchPadInst *getTombstoneKey() {
1643           return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
1644         }
1645 
1646         static unsigned getHashValue(CatchPadInst *CatchPad) {
1647           return static_cast<unsigned>(hash_combine_range(
1648               CatchPad->value_op_begin(), CatchPad->value_op_end()));
1649         }
1650 
1651         static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
1652           if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
1653               RHS == getEmptyKey() || RHS == getTombstoneKey())
1654             return LHS == RHS;
1655           return LHS->isIdenticalTo(RHS);
1656         }
1657       };
1658 
1659       // Set of unique CatchPads.
1660       SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
1661                     CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
1662           HandlerSet;
1663       detail::DenseSetEmpty Empty;
1664       for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
1665                                              E = CatchSwitch->handler_end();
1666            I != E; ++I) {
1667         BasicBlock *HandlerBB = *I;
1668         auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
1669         if (!HandlerSet.insert({CatchPad, Empty}).second) {
1670           CatchSwitch->removeHandler(I);
1671           --I;
1672           --E;
1673           Changed = true;
1674         }
1675       }
1676     }
1677 
1678     Changed |= ConstantFoldTerminator(BB, true);
1679     for (BasicBlock *Successor : successors(BB))
1680       if (Reachable.insert(Successor).second)
1681         Worklist.push_back(Successor);
1682   } while (!Worklist.empty());
1683   return Changed;
1684 }
1685 
1686 void llvm::removeUnwindEdge(BasicBlock *BB) {
1687   TerminatorInst *TI = BB->getTerminator();
1688 
1689   if (auto *II = dyn_cast<InvokeInst>(TI)) {
1690     changeToCall(II);
1691     return;
1692   }
1693 
1694   TerminatorInst *NewTI;
1695   BasicBlock *UnwindDest;
1696 
1697   if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
1698     NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
1699     UnwindDest = CRI->getUnwindDest();
1700   } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
1701     auto *NewCatchSwitch = CatchSwitchInst::Create(
1702         CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
1703         CatchSwitch->getName(), CatchSwitch);
1704     for (BasicBlock *PadBB : CatchSwitch->handlers())
1705       NewCatchSwitch->addHandler(PadBB);
1706 
1707     NewTI = NewCatchSwitch;
1708     UnwindDest = CatchSwitch->getUnwindDest();
1709   } else {
1710     llvm_unreachable("Could not find unwind successor");
1711   }
1712 
1713   NewTI->takeName(TI);
1714   NewTI->setDebugLoc(TI->getDebugLoc());
1715   UnwindDest->removePredecessor(BB);
1716   TI->replaceAllUsesWith(NewTI);
1717   TI->eraseFromParent();
1718 }
1719 
1720 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
1721 /// if they are in a dead cycle.  Return true if a change was made, false
1722 /// otherwise. If `LVI` is passed, this function preserves LazyValueInfo
1723 /// after modifying the CFG.
1724 bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI) {
1725   SmallPtrSet<BasicBlock*, 16> Reachable;
1726   bool Changed = markAliveBlocks(F, Reachable);
1727 
1728   // If there are unreachable blocks in the CFG...
1729   if (Reachable.size() == F.size())
1730     return Changed;
1731 
1732   assert(Reachable.size() < F.size());
1733   NumRemoved += F.size()-Reachable.size();
1734 
1735   // Loop over all of the basic blocks that are not reachable, dropping all of
1736   // their internal references...
1737   for (Function::iterator BB = ++F.begin(), E = F.end(); BB != E; ++BB) {
1738     if (Reachable.count(&*BB))
1739       continue;
1740 
1741     for (BasicBlock *Successor : successors(&*BB))
1742       if (Reachable.count(Successor))
1743         Successor->removePredecessor(&*BB);
1744     if (LVI)
1745       LVI->eraseBlock(&*BB);
1746     BB->dropAllReferences();
1747   }
1748 
1749   for (Function::iterator I = ++F.begin(); I != F.end();)
1750     if (!Reachable.count(&*I))
1751       I = F.getBasicBlockList().erase(I);
1752     else
1753       ++I;
1754 
1755   return true;
1756 }
1757 
1758 void llvm::combineMetadata(Instruction *K, const Instruction *J,
1759                            ArrayRef<unsigned> KnownIDs) {
1760   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
1761   K->dropUnknownNonDebugMetadata(KnownIDs);
1762   K->getAllMetadataOtherThanDebugLoc(Metadata);
1763   for (const auto &MD : Metadata) {
1764     unsigned Kind = MD.first;
1765     MDNode *JMD = J->getMetadata(Kind);
1766     MDNode *KMD = MD.second;
1767 
1768     switch (Kind) {
1769       default:
1770         K->setMetadata(Kind, nullptr); // Remove unknown metadata
1771         break;
1772       case LLVMContext::MD_dbg:
1773         llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
1774       case LLVMContext::MD_tbaa:
1775         K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
1776         break;
1777       case LLVMContext::MD_alias_scope:
1778         K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
1779         break;
1780       case LLVMContext::MD_noalias:
1781       case LLVMContext::MD_mem_parallel_loop_access:
1782         K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
1783         break;
1784       case LLVMContext::MD_range:
1785         K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
1786         break;
1787       case LLVMContext::MD_fpmath:
1788         K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
1789         break;
1790       case LLVMContext::MD_invariant_load:
1791         // Only set the !invariant.load if it is present in both instructions.
1792         K->setMetadata(Kind, JMD);
1793         break;
1794       case LLVMContext::MD_nonnull:
1795         // Only set the !nonnull if it is present in both instructions.
1796         K->setMetadata(Kind, JMD);
1797         break;
1798       case LLVMContext::MD_invariant_group:
1799         // Preserve !invariant.group in K.
1800         break;
1801       case LLVMContext::MD_align:
1802         K->setMetadata(Kind,
1803           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
1804         break;
1805       case LLVMContext::MD_dereferenceable:
1806       case LLVMContext::MD_dereferenceable_or_null:
1807         K->setMetadata(Kind,
1808           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
1809         break;
1810     }
1811   }
1812   // Set !invariant.group from J if J has it. If both instructions have it
1813   // then we will just pick it from J - even when they are different.
1814   // Also make sure that K is load or store - f.e. combining bitcast with load
1815   // could produce bitcast with invariant.group metadata, which is invalid.
1816   // FIXME: we should try to preserve both invariant.group md if they are
1817   // different, but right now instruction can only have one invariant.group.
1818   if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
1819     if (isa<LoadInst>(K) || isa<StoreInst>(K))
1820       K->setMetadata(LLVMContext::MD_invariant_group, JMD);
1821 }
1822 
1823 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J) {
1824   unsigned KnownIDs[] = {
1825       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
1826       LLVMContext::MD_noalias,         LLVMContext::MD_range,
1827       LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
1828       LLVMContext::MD_invariant_group, LLVMContext::MD_align,
1829       LLVMContext::MD_dereferenceable,
1830       LLVMContext::MD_dereferenceable_or_null};
1831   combineMetadata(K, J, KnownIDs);
1832 }
1833 
1834 template <typename RootType, typename DominatesFn>
1835 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
1836                                          const RootType &Root,
1837                                          const DominatesFn &Dominates) {
1838   assert(From->getType() == To->getType());
1839 
1840   unsigned Count = 0;
1841   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
1842        UI != UE;) {
1843     Use &U = *UI++;
1844     if (!Dominates(Root, U))
1845       continue;
1846     U.set(To);
1847     DEBUG(dbgs() << "Replace dominated use of '" << From->getName() << "' as "
1848                  << *To << " in " << *U << "\n");
1849     ++Count;
1850   }
1851   return Count;
1852 }
1853 
1854 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
1855    assert(From->getType() == To->getType());
1856    auto *BB = From->getParent();
1857    unsigned Count = 0;
1858 
1859   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
1860        UI != UE;) {
1861     Use &U = *UI++;
1862     auto *I = cast<Instruction>(U.getUser());
1863     if (I->getParent() == BB)
1864       continue;
1865     U.set(To);
1866     ++Count;
1867   }
1868   return Count;
1869 }
1870 
1871 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
1872                                         DominatorTree &DT,
1873                                         const BasicBlockEdge &Root) {
1874   auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
1875     return DT.dominates(Root, U);
1876   };
1877   return ::replaceDominatedUsesWith(From, To, Root, Dominates);
1878 }
1879 
1880 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
1881                                         DominatorTree &DT,
1882                                         const BasicBlock *BB) {
1883   auto ProperlyDominates = [&DT](const BasicBlock *BB, const Use &U) {
1884     auto *I = cast<Instruction>(U.getUser())->getParent();
1885     return DT.properlyDominates(BB, I);
1886   };
1887   return ::replaceDominatedUsesWith(From, To, BB, ProperlyDominates);
1888 }
1889 
1890 bool llvm::callsGCLeafFunction(ImmutableCallSite CS,
1891                                const TargetLibraryInfo &TLI) {
1892   // Check if the function is specifically marked as a gc leaf function.
1893   if (CS.hasFnAttr("gc-leaf-function"))
1894     return true;
1895   if (const Function *F = CS.getCalledFunction()) {
1896     if (F->hasFnAttribute("gc-leaf-function"))
1897       return true;
1898 
1899     if (auto IID = F->getIntrinsicID())
1900       // Most LLVM intrinsics do not take safepoints.
1901       return IID != Intrinsic::experimental_gc_statepoint &&
1902              IID != Intrinsic::experimental_deoptimize;
1903   }
1904 
1905   // Lib calls can be materialized by some passes, and won't be
1906   // marked as 'gc-leaf-function.' All available Libcalls are
1907   // GC-leaf.
1908   LibFunc LF;
1909   if (TLI.getLibFunc(CS, LF)) {
1910     return TLI.has(LF);
1911   }
1912 
1913   return false;
1914 }
1915 
1916 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
1917                                LoadInst &NewLI) {
1918   auto *NewTy = NewLI.getType();
1919 
1920   // This only directly applies if the new type is also a pointer.
1921   if (NewTy->isPointerTy()) {
1922     NewLI.setMetadata(LLVMContext::MD_nonnull, N);
1923     return;
1924   }
1925 
1926   // The only other translation we can do is to integral loads with !range
1927   // metadata.
1928   if (!NewTy->isIntegerTy())
1929     return;
1930 
1931   MDBuilder MDB(NewLI.getContext());
1932   const Value *Ptr = OldLI.getPointerOperand();
1933   auto *ITy = cast<IntegerType>(NewTy);
1934   auto *NullInt = ConstantExpr::getPtrToInt(
1935       ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
1936   auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
1937   NewLI.setMetadata(LLVMContext::MD_range,
1938                     MDB.createRange(NonNullInt, NullInt));
1939 }
1940 
1941 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
1942                              MDNode *N, LoadInst &NewLI) {
1943   auto *NewTy = NewLI.getType();
1944 
1945   // Give up unless it is converted to a pointer where there is a single very
1946   // valuable mapping we can do reliably.
1947   // FIXME: It would be nice to propagate this in more ways, but the type
1948   // conversions make it hard.
1949   if (!NewTy->isPointerTy())
1950     return;
1951 
1952   unsigned BitWidth = DL.getTypeSizeInBits(NewTy);
1953   if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
1954     MDNode *NN = MDNode::get(OldLI.getContext(), None);
1955     NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
1956   }
1957 }
1958 
1959 namespace {
1960 
1961 /// A potential constituent of a bitreverse or bswap expression. See
1962 /// collectBitParts for a fuller explanation.
1963 struct BitPart {
1964   BitPart(Value *P, unsigned BW) : Provider(P) {
1965     Provenance.resize(BW);
1966   }
1967 
1968   /// The Value that this is a bitreverse/bswap of.
1969   Value *Provider;
1970 
1971   /// The "provenance" of each bit. Provenance[A] = B means that bit A
1972   /// in Provider becomes bit B in the result of this expression.
1973   SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
1974 
1975   enum { Unset = -1 };
1976 };
1977 
1978 } // end anonymous namespace
1979 
1980 /// Analyze the specified subexpression and see if it is capable of providing
1981 /// pieces of a bswap or bitreverse. The subexpression provides a potential
1982 /// piece of a bswap or bitreverse if it can be proven that each non-zero bit in
1983 /// the output of the expression came from a corresponding bit in some other
1984 /// value. This function is recursive, and the end result is a mapping of
1985 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
1986 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
1987 ///
1988 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
1989 /// that the expression deposits the low byte of %X into the high byte of the
1990 /// result and that all other bits are zero. This expression is accepted and a
1991 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
1992 /// [0-7].
1993 ///
1994 /// To avoid revisiting values, the BitPart results are memoized into the
1995 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
1996 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
1997 /// store BitParts objects, not pointers. As we need the concept of a nullptr
1998 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
1999 /// type instead to provide the same functionality.
2000 ///
2001 /// Because we pass around references into \c BPS, we must use a container that
2002 /// does not invalidate internal references (std::map instead of DenseMap).
2003 static const Optional<BitPart> &
2004 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2005                 std::map<Value *, Optional<BitPart>> &BPS) {
2006   auto I = BPS.find(V);
2007   if (I != BPS.end())
2008     return I->second;
2009 
2010   auto &Result = BPS[V] = None;
2011   auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2012 
2013   if (Instruction *I = dyn_cast<Instruction>(V)) {
2014     // If this is an or instruction, it may be an inner node of the bswap.
2015     if (I->getOpcode() == Instruction::Or) {
2016       auto &A = collectBitParts(I->getOperand(0), MatchBSwaps,
2017                                 MatchBitReversals, BPS);
2018       auto &B = collectBitParts(I->getOperand(1), MatchBSwaps,
2019                                 MatchBitReversals, BPS);
2020       if (!A || !B)
2021         return Result;
2022 
2023       // Try and merge the two together.
2024       if (!A->Provider || A->Provider != B->Provider)
2025         return Result;
2026 
2027       Result = BitPart(A->Provider, BitWidth);
2028       for (unsigned i = 0; i < A->Provenance.size(); ++i) {
2029         if (A->Provenance[i] != BitPart::Unset &&
2030             B->Provenance[i] != BitPart::Unset &&
2031             A->Provenance[i] != B->Provenance[i])
2032           return Result = None;
2033 
2034         if (A->Provenance[i] == BitPart::Unset)
2035           Result->Provenance[i] = B->Provenance[i];
2036         else
2037           Result->Provenance[i] = A->Provenance[i];
2038       }
2039 
2040       return Result;
2041     }
2042 
2043     // If this is a logical shift by a constant, recurse then shift the result.
2044     if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
2045       unsigned BitShift =
2046           cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
2047       // Ensure the shift amount is defined.
2048       if (BitShift > BitWidth)
2049         return Result;
2050 
2051       auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2052                                   MatchBitReversals, BPS);
2053       if (!Res)
2054         return Result;
2055       Result = Res;
2056 
2057       // Perform the "shift" on BitProvenance.
2058       auto &P = Result->Provenance;
2059       if (I->getOpcode() == Instruction::Shl) {
2060         P.erase(std::prev(P.end(), BitShift), P.end());
2061         P.insert(P.begin(), BitShift, BitPart::Unset);
2062       } else {
2063         P.erase(P.begin(), std::next(P.begin(), BitShift));
2064         P.insert(P.end(), BitShift, BitPart::Unset);
2065       }
2066 
2067       return Result;
2068     }
2069 
2070     // If this is a logical 'and' with a mask that clears bits, recurse then
2071     // unset the appropriate bits.
2072     if (I->getOpcode() == Instruction::And &&
2073         isa<ConstantInt>(I->getOperand(1))) {
2074       APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1);
2075       const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
2076 
2077       // Check that the mask allows a multiple of 8 bits for a bswap, for an
2078       // early exit.
2079       unsigned NumMaskedBits = AndMask.countPopulation();
2080       if (!MatchBitReversals && NumMaskedBits % 8 != 0)
2081         return Result;
2082 
2083       auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2084                                   MatchBitReversals, BPS);
2085       if (!Res)
2086         return Result;
2087       Result = Res;
2088 
2089       for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1)
2090         // If the AndMask is zero for this bit, clear the bit.
2091         if ((AndMask & Bit) == 0)
2092           Result->Provenance[i] = BitPart::Unset;
2093       return Result;
2094     }
2095 
2096     // If this is a zext instruction zero extend the result.
2097     if (I->getOpcode() == Instruction::ZExt) {
2098       auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2099                                   MatchBitReversals, BPS);
2100       if (!Res)
2101         return Result;
2102 
2103       Result = BitPart(Res->Provider, BitWidth);
2104       auto NarrowBitWidth =
2105           cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth();
2106       for (unsigned i = 0; i < NarrowBitWidth; ++i)
2107         Result->Provenance[i] = Res->Provenance[i];
2108       for (unsigned i = NarrowBitWidth; i < BitWidth; ++i)
2109         Result->Provenance[i] = BitPart::Unset;
2110       return Result;
2111     }
2112   }
2113 
2114   // Okay, we got to something that isn't a shift, 'or' or 'and'.  This must be
2115   // the input value to the bswap/bitreverse.
2116   Result = BitPart(V, BitWidth);
2117   for (unsigned i = 0; i < BitWidth; ++i)
2118     Result->Provenance[i] = i;
2119   return Result;
2120 }
2121 
2122 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
2123                                           unsigned BitWidth) {
2124   if (From % 8 != To % 8)
2125     return false;
2126   // Convert from bit indices to byte indices and check for a byte reversal.
2127   From >>= 3;
2128   To >>= 3;
2129   BitWidth >>= 3;
2130   return From == BitWidth - To - 1;
2131 }
2132 
2133 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
2134                                                unsigned BitWidth) {
2135   return From == BitWidth - To - 1;
2136 }
2137 
2138 bool llvm::recognizeBSwapOrBitReverseIdiom(
2139     Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
2140     SmallVectorImpl<Instruction *> &InsertedInsts) {
2141   if (Operator::getOpcode(I) != Instruction::Or)
2142     return false;
2143   if (!MatchBSwaps && !MatchBitReversals)
2144     return false;
2145   IntegerType *ITy = dyn_cast<IntegerType>(I->getType());
2146   if (!ITy || ITy->getBitWidth() > 128)
2147     return false;   // Can't do vectors or integers > 128 bits.
2148   unsigned BW = ITy->getBitWidth();
2149 
2150   unsigned DemandedBW = BW;
2151   IntegerType *DemandedTy = ITy;
2152   if (I->hasOneUse()) {
2153     if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) {
2154       DemandedTy = cast<IntegerType>(Trunc->getType());
2155       DemandedBW = DemandedTy->getBitWidth();
2156     }
2157   }
2158 
2159   // Try to find all the pieces corresponding to the bswap.
2160   std::map<Value *, Optional<BitPart>> BPS;
2161   auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS);
2162   if (!Res)
2163     return false;
2164   auto &BitProvenance = Res->Provenance;
2165 
2166   // Now, is the bit permutation correct for a bswap or a bitreverse? We can
2167   // only byteswap values with an even number of bytes.
2168   bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true;
2169   for (unsigned i = 0; i < DemandedBW; ++i) {
2170     OKForBSwap &=
2171         bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW);
2172     OKForBitReverse &=
2173         bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW);
2174   }
2175 
2176   Intrinsic::ID Intrin;
2177   if (OKForBSwap && MatchBSwaps)
2178     Intrin = Intrinsic::bswap;
2179   else if (OKForBitReverse && MatchBitReversals)
2180     Intrin = Intrinsic::bitreverse;
2181   else
2182     return false;
2183 
2184   if (ITy != DemandedTy) {
2185     Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
2186     Value *Provider = Res->Provider;
2187     IntegerType *ProviderTy = cast<IntegerType>(Provider->getType());
2188     // We may need to truncate the provider.
2189     if (DemandedTy != ProviderTy) {
2190       auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy,
2191                                      "trunc", I);
2192       InsertedInsts.push_back(Trunc);
2193       Provider = Trunc;
2194     }
2195     auto *CI = CallInst::Create(F, Provider, "rev", I);
2196     InsertedInsts.push_back(CI);
2197     auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I);
2198     InsertedInsts.push_back(ExtInst);
2199     return true;
2200   }
2201 
2202   Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy);
2203   InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I));
2204   return true;
2205 }
2206 
2207 // CodeGen has special handling for some string functions that may replace
2208 // them with target-specific intrinsics.  Since that'd skip our interceptors
2209 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
2210 // we mark affected calls as NoBuiltin, which will disable optimization
2211 // in CodeGen.
2212 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
2213     CallInst *CI, const TargetLibraryInfo *TLI) {
2214   Function *F = CI->getCalledFunction();
2215   LibFunc Func;
2216   if (F && !F->hasLocalLinkage() && F->hasName() &&
2217       TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
2218       !F->doesNotAccessMemory())
2219     CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
2220 }
2221 
2222 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
2223   // We can't have a PHI with a metadata type.
2224   if (I->getOperand(OpIdx)->getType()->isMetadataTy())
2225     return false;
2226 
2227   // Early exit.
2228   if (!isa<Constant>(I->getOperand(OpIdx)))
2229     return true;
2230 
2231   switch (I->getOpcode()) {
2232   default:
2233     return true;
2234   case Instruction::Call:
2235   case Instruction::Invoke:
2236     // Can't handle inline asm. Skip it.
2237     if (isa<InlineAsm>(ImmutableCallSite(I).getCalledValue()))
2238       return false;
2239     // Many arithmetic intrinsics have no issue taking a
2240     // variable, however it's hard to distingish these from
2241     // specials such as @llvm.frameaddress that require a constant.
2242     if (isa<IntrinsicInst>(I))
2243       return false;
2244 
2245     // Constant bundle operands may need to retain their constant-ness for
2246     // correctness.
2247     if (ImmutableCallSite(I).isBundleOperand(OpIdx))
2248       return false;
2249     return true;
2250   case Instruction::ShuffleVector:
2251     // Shufflevector masks are constant.
2252     return OpIdx != 2;
2253   case Instruction::Switch:
2254   case Instruction::ExtractValue:
2255     // All operands apart from the first are constant.
2256     return OpIdx == 0;
2257   case Instruction::InsertValue:
2258     // All operands apart from the first and the second are constant.
2259     return OpIdx < 2;
2260   case Instruction::Alloca:
2261     // Static allocas (constant size in the entry block) are handled by
2262     // prologue/epilogue insertion so they're free anyway. We definitely don't
2263     // want to make them non-constant.
2264     return !dyn_cast<AllocaInst>(I)->isStaticAlloca();
2265   case Instruction::GetElementPtr:
2266     if (OpIdx == 0)
2267       return true;
2268     gep_type_iterator It = gep_type_begin(I);
2269     for (auto E = std::next(It, OpIdx); It != E; ++It)
2270       if (It.isStruct())
2271         return false;
2272     return true;
2273   }
2274 }
2275