1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Utils/Local.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/DomTreeUpdater.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/LazyValueInfo.h"
33 #include "llvm/Analysis/MemoryBuiltins.h"
34 #include "llvm/Analysis/MemorySSAUpdater.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/Analysis/VectorUtils.h"
38 #include "llvm/BinaryFormat/Dwarf.h"
39 #include "llvm/IR/Argument.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/IR/CFG.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/ConstantRange.h"
45 #include "llvm/IR/Constants.h"
46 #include "llvm/IR/DIBuilder.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Dominators.h"
52 #include "llvm/IR/Function.h"
53 #include "llvm/IR/GetElementPtrTypeIterator.h"
54 #include "llvm/IR/GlobalObject.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InstrTypes.h"
57 #include "llvm/IR/Instruction.h"
58 #include "llvm/IR/Instructions.h"
59 #include "llvm/IR/IntrinsicInst.h"
60 #include "llvm/IR/Intrinsics.h"
61 #include "llvm/IR/LLVMContext.h"
62 #include "llvm/IR/MDBuilder.h"
63 #include "llvm/IR/Metadata.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/Operator.h"
66 #include "llvm/IR/PatternMatch.h"
67 #include "llvm/IR/PseudoProbe.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/KnownBits.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
79 #include "llvm/Transforms/Utils/ValueMapper.h"
80 #include <algorithm>
81 #include <cassert>
82 #include <climits>
83 #include <cstdint>
84 #include <iterator>
85 #include <map>
86 #include <utility>
87 
88 using namespace llvm;
89 using namespace llvm::PatternMatch;
90 
91 #define DEBUG_TYPE "local"
92 
93 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95 
96 static cl::opt<bool> PHICSEDebugHash(
97     "phicse-debug-hash",
98 #ifdef EXPENSIVE_CHECKS
99     cl::init(true),
100 #else
101     cl::init(false),
102 #endif
103     cl::Hidden,
104     cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105              "function is well-behaved w.r.t. its isEqual predicate"));
106 
107 static cl::opt<unsigned> PHICSENumPHISmallSize(
108     "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
109     cl::desc(
110         "When the basic block contains not more than this number of PHI nodes, "
111         "perform a (faster!) exhaustive search instead of set-driven one."));
112 
113 // Max recursion depth for collectBitParts used when detecting bswap and
114 // bitreverse idioms
115 static const unsigned BitPartRecursionMaxDepth = 64;
116 
117 //===----------------------------------------------------------------------===//
118 //  Local constant propagation.
119 //
120 
121 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
122 /// constant value, convert it into an unconditional branch to the constant
123 /// destination.  This is a nontrivial operation because the successors of this
124 /// basic block must have their PHI nodes updated.
125 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
126 /// conditions and indirectbr addresses this might make dead if
127 /// DeleteDeadConditions is true.
128 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
129                                   const TargetLibraryInfo *TLI,
130                                   DomTreeUpdater *DTU) {
131   Instruction *T = BB->getTerminator();
132   IRBuilder<> Builder(T);
133 
134   // Branch - See if we are conditional jumping on constant
135   if (auto *BI = dyn_cast<BranchInst>(T)) {
136     if (BI->isUnconditional()) return false;  // Can't optimize uncond branch
137 
138     BasicBlock *Dest1 = BI->getSuccessor(0);
139     BasicBlock *Dest2 = BI->getSuccessor(1);
140 
141     if (Dest2 == Dest1) {       // Conditional branch to same location?
142       // This branch matches something like this:
143       //     br bool %cond, label %Dest, label %Dest
144       // and changes it into:  br label %Dest
145 
146       // Let the basic block know that we are letting go of one copy of it.
147       assert(BI->getParent() && "Terminator not inserted in block!");
148       Dest1->removePredecessor(BI->getParent());
149 
150       // Replace the conditional branch with an unconditional one.
151       Builder.CreateBr(Dest1);
152       Value *Cond = BI->getCondition();
153       BI->eraseFromParent();
154       if (DeleteDeadConditions)
155         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
156       return true;
157     }
158 
159     if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
160       // Are we branching on constant?
161       // YES.  Change to unconditional branch...
162       BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
163       BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
164 
165       // Let the basic block know that we are letting go of it.  Based on this,
166       // it will adjust it's PHI nodes.
167       OldDest->removePredecessor(BB);
168 
169       // Replace the conditional branch with an unconditional one.
170       BranchInst *NewBI = Builder.CreateBr(Destination);
171 
172       // Transfer the metadata to the new branch instruction.
173       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
174                                 LLVMContext::MD_annotation});
175 
176       BI->eraseFromParent();
177       if (DTU)
178         DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
179       return true;
180     }
181 
182     return false;
183   }
184 
185   if (auto *SI = dyn_cast<SwitchInst>(T)) {
186     // If we are switching on a constant, we can convert the switch to an
187     // unconditional branch.
188     auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
189     BasicBlock *DefaultDest = SI->getDefaultDest();
190     BasicBlock *TheOnlyDest = DefaultDest;
191 
192     // If the default is unreachable, ignore it when searching for TheOnlyDest.
193     if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
194         SI->getNumCases() > 0) {
195       TheOnlyDest = SI->case_begin()->getCaseSuccessor();
196     }
197 
198     bool Changed = false;
199 
200     // Figure out which case it goes to.
201     for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
202       // Found case matching a constant operand?
203       if (i->getCaseValue() == CI) {
204         TheOnlyDest = i->getCaseSuccessor();
205         break;
206       }
207 
208       // Check to see if this branch is going to the same place as the default
209       // dest.  If so, eliminate it as an explicit compare.
210       if (i->getCaseSuccessor() == DefaultDest) {
211         MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
212         unsigned NCases = SI->getNumCases();
213         // Fold the case metadata into the default if there will be any branches
214         // left, unless the metadata doesn't match the switch.
215         if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
216           // Collect branch weights into a vector.
217           SmallVector<uint32_t, 8> Weights;
218           for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
219                ++MD_i) {
220             auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
221             Weights.push_back(CI->getValue().getZExtValue());
222           }
223           // Merge weight of this case to the default weight.
224           unsigned idx = i->getCaseIndex();
225           Weights[0] += Weights[idx+1];
226           // Remove weight for this case.
227           std::swap(Weights[idx+1], Weights.back());
228           Weights.pop_back();
229           SI->setMetadata(LLVMContext::MD_prof,
230                           MDBuilder(BB->getContext()).
231                           createBranchWeights(Weights));
232         }
233         // Remove this entry.
234         BasicBlock *ParentBB = SI->getParent();
235         DefaultDest->removePredecessor(ParentBB);
236         i = SI->removeCase(i);
237         e = SI->case_end();
238         Changed = true;
239         continue;
240       }
241 
242       // Otherwise, check to see if the switch only branches to one destination.
243       // We do this by reseting "TheOnlyDest" to null when we find two non-equal
244       // destinations.
245       if (i->getCaseSuccessor() != TheOnlyDest)
246         TheOnlyDest = nullptr;
247 
248       // Increment this iterator as we haven't removed the case.
249       ++i;
250     }
251 
252     if (CI && !TheOnlyDest) {
253       // Branching on a constant, but not any of the cases, go to the default
254       // successor.
255       TheOnlyDest = SI->getDefaultDest();
256     }
257 
258     // If we found a single destination that we can fold the switch into, do so
259     // now.
260     if (TheOnlyDest) {
261       // Insert the new branch.
262       Builder.CreateBr(TheOnlyDest);
263       BasicBlock *BB = SI->getParent();
264 
265       SmallSet<BasicBlock *, 8> RemovedSuccessors;
266 
267       // Remove entries from PHI nodes which we no longer branch to...
268       BasicBlock *SuccToKeep = TheOnlyDest;
269       for (BasicBlock *Succ : successors(SI)) {
270         if (DTU && Succ != TheOnlyDest)
271           RemovedSuccessors.insert(Succ);
272         // Found case matching a constant operand?
273         if (Succ == SuccToKeep) {
274           SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
275         } else {
276           Succ->removePredecessor(BB);
277         }
278       }
279 
280       // Delete the old switch.
281       Value *Cond = SI->getCondition();
282       SI->eraseFromParent();
283       if (DeleteDeadConditions)
284         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
285       if (DTU) {
286         std::vector<DominatorTree::UpdateType> Updates;
287         Updates.reserve(RemovedSuccessors.size());
288         for (auto *RemovedSuccessor : RemovedSuccessors)
289           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
290         DTU->applyUpdates(Updates);
291       }
292       return true;
293     }
294 
295     if (SI->getNumCases() == 1) {
296       // Otherwise, we can fold this switch into a conditional branch
297       // instruction if it has only one non-default destination.
298       auto FirstCase = *SI->case_begin();
299       Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
300           FirstCase.getCaseValue(), "cond");
301 
302       // Insert the new branch.
303       BranchInst *NewBr = Builder.CreateCondBr(Cond,
304                                                FirstCase.getCaseSuccessor(),
305                                                SI->getDefaultDest());
306       MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
307       if (MD && MD->getNumOperands() == 3) {
308         ConstantInt *SICase =
309             mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
310         ConstantInt *SIDef =
311             mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
312         assert(SICase && SIDef);
313         // The TrueWeight should be the weight for the single case of SI.
314         NewBr->setMetadata(LLVMContext::MD_prof,
315                         MDBuilder(BB->getContext()).
316                         createBranchWeights(SICase->getValue().getZExtValue(),
317                                             SIDef->getValue().getZExtValue()));
318       }
319 
320       // Update make.implicit metadata to the newly-created conditional branch.
321       MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
322       if (MakeImplicitMD)
323         NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
324 
325       // Delete the old switch.
326       SI->eraseFromParent();
327       return true;
328     }
329     return Changed;
330   }
331 
332   if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
333     // indirectbr blockaddress(@F, @BB) -> br label @BB
334     if (auto *BA =
335           dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
336       BasicBlock *TheOnlyDest = BA->getBasicBlock();
337       SmallSet<BasicBlock *, 8> RemovedSuccessors;
338 
339       // Insert the new branch.
340       Builder.CreateBr(TheOnlyDest);
341 
342       BasicBlock *SuccToKeep = TheOnlyDest;
343       for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
344         BasicBlock *DestBB = IBI->getDestination(i);
345         if (DTU && DestBB != TheOnlyDest)
346           RemovedSuccessors.insert(DestBB);
347         if (IBI->getDestination(i) == SuccToKeep) {
348           SuccToKeep = nullptr;
349         } else {
350           DestBB->removePredecessor(BB);
351         }
352       }
353       Value *Address = IBI->getAddress();
354       IBI->eraseFromParent();
355       if (DeleteDeadConditions)
356         // Delete pointer cast instructions.
357         RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
358 
359       // Also zap the blockaddress constant if there are no users remaining,
360       // otherwise the destination is still marked as having its address taken.
361       if (BA->use_empty())
362         BA->destroyConstant();
363 
364       // If we didn't find our destination in the IBI successor list, then we
365       // have undefined behavior.  Replace the unconditional branch with an
366       // 'unreachable' instruction.
367       if (SuccToKeep) {
368         BB->getTerminator()->eraseFromParent();
369         new UnreachableInst(BB->getContext(), BB);
370       }
371 
372       if (DTU) {
373         std::vector<DominatorTree::UpdateType> Updates;
374         Updates.reserve(RemovedSuccessors.size());
375         for (auto *RemovedSuccessor : RemovedSuccessors)
376           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
377         DTU->applyUpdates(Updates);
378       }
379       return true;
380     }
381   }
382 
383   return false;
384 }
385 
386 //===----------------------------------------------------------------------===//
387 //  Local dead code elimination.
388 //
389 
390 /// isInstructionTriviallyDead - Return true if the result produced by the
391 /// instruction is not used, and the instruction has no side effects.
392 ///
393 bool llvm::isInstructionTriviallyDead(Instruction *I,
394                                       const TargetLibraryInfo *TLI) {
395   if (!I->use_empty())
396     return false;
397   return wouldInstructionBeTriviallyDead(I, TLI);
398 }
399 
400 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
401                                            const TargetLibraryInfo *TLI) {
402   if (I->isTerminator())
403     return false;
404 
405   // We don't want the landingpad-like instructions removed by anything this
406   // general.
407   if (I->isEHPad())
408     return false;
409 
410   // We don't want debug info removed by anything this general, unless
411   // debug info is empty.
412   if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
413     if (DDI->getAddress())
414       return false;
415     return true;
416   }
417   if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
418     if (DVI->hasArgList() || DVI->getValue(0))
419       return false;
420     return true;
421   }
422   if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
423     if (DLI->getLabel())
424       return false;
425     return true;
426   }
427 
428   if (!I->willReturn())
429     return false;
430 
431   if (!I->mayHaveSideEffects())
432     return true;
433 
434   // Special case intrinsics that "may have side effects" but can be deleted
435   // when dead.
436   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
437     // Safe to delete llvm.stacksave and launder.invariant.group if dead.
438     if (II->getIntrinsicID() == Intrinsic::stacksave ||
439         II->getIntrinsicID() == Intrinsic::launder_invariant_group)
440       return true;
441 
442     if (II->isLifetimeStartOrEnd()) {
443       auto *Arg = II->getArgOperand(1);
444       // Lifetime intrinsics are dead when their right-hand is undef.
445       if (isa<UndefValue>(Arg))
446         return true;
447       // If the right-hand is an alloc, global, or argument and the only uses
448       // are lifetime intrinsics then the intrinsics are dead.
449       if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
450         return llvm::all_of(Arg->uses(), [](Use &Use) {
451           if (IntrinsicInst *IntrinsicUse =
452                   dyn_cast<IntrinsicInst>(Use.getUser()))
453             return IntrinsicUse->isLifetimeStartOrEnd();
454           return false;
455         });
456       return false;
457     }
458 
459     // Assumptions are dead if their condition is trivially true.  Guards on
460     // true are operationally no-ops.  In the future we can consider more
461     // sophisticated tradeoffs for guards considering potential for check
462     // widening, but for now we keep things simple.
463     if ((II->getIntrinsicID() == Intrinsic::assume &&
464          isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) ||
465         II->getIntrinsicID() == Intrinsic::experimental_guard) {
466       if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
467         return !Cond->isZero();
468 
469       return false;
470     }
471   }
472 
473   if (isAllocLikeFn(I, TLI))
474     return true;
475 
476   if (CallInst *CI = isFreeCall(I, TLI))
477     if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
478       return C->isNullValue() || isa<UndefValue>(C);
479 
480   if (auto *Call = dyn_cast<CallBase>(I))
481     if (isMathLibCallNoop(Call, TLI))
482       return true;
483 
484   return false;
485 }
486 
487 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
488 /// trivially dead instruction, delete it.  If that makes any of its operands
489 /// trivially dead, delete them too, recursively.  Return true if any
490 /// instructions were deleted.
491 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
492     Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
493     std::function<void(Value *)> AboutToDeleteCallback) {
494   Instruction *I = dyn_cast<Instruction>(V);
495   if (!I || !isInstructionTriviallyDead(I, TLI))
496     return false;
497 
498   SmallVector<WeakTrackingVH, 16> DeadInsts;
499   DeadInsts.push_back(I);
500   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
501                                              AboutToDeleteCallback);
502 
503   return true;
504 }
505 
506 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
507     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
508     MemorySSAUpdater *MSSAU,
509     std::function<void(Value *)> AboutToDeleteCallback) {
510   unsigned S = 0, E = DeadInsts.size(), Alive = 0;
511   for (; S != E; ++S) {
512     auto *I = cast<Instruction>(DeadInsts[S]);
513     if (!isInstructionTriviallyDead(I)) {
514       DeadInsts[S] = nullptr;
515       ++Alive;
516     }
517   }
518   if (Alive == E)
519     return false;
520   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
521                                              AboutToDeleteCallback);
522   return true;
523 }
524 
525 void llvm::RecursivelyDeleteTriviallyDeadInstructions(
526     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
527     MemorySSAUpdater *MSSAU,
528     std::function<void(Value *)> AboutToDeleteCallback) {
529   // Process the dead instruction list until empty.
530   while (!DeadInsts.empty()) {
531     Value *V = DeadInsts.pop_back_val();
532     Instruction *I = cast_or_null<Instruction>(V);
533     if (!I)
534       continue;
535     assert(isInstructionTriviallyDead(I, TLI) &&
536            "Live instruction found in dead worklist!");
537     assert(I->use_empty() && "Instructions with uses are not dead.");
538 
539     // Don't lose the debug info while deleting the instructions.
540     salvageDebugInfo(*I);
541 
542     if (AboutToDeleteCallback)
543       AboutToDeleteCallback(I);
544 
545     // Null out all of the instruction's operands to see if any operand becomes
546     // dead as we go.
547     for (Use &OpU : I->operands()) {
548       Value *OpV = OpU.get();
549       OpU.set(nullptr);
550 
551       if (!OpV->use_empty())
552         continue;
553 
554       // If the operand is an instruction that became dead as we nulled out the
555       // operand, and if it is 'trivially' dead, delete it in a future loop
556       // iteration.
557       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
558         if (isInstructionTriviallyDead(OpI, TLI))
559           DeadInsts.push_back(OpI);
560     }
561     if (MSSAU)
562       MSSAU->removeMemoryAccess(I);
563 
564     I->eraseFromParent();
565   }
566 }
567 
568 bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
569   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
570   findDbgUsers(DbgUsers, I);
571   for (auto *DII : DbgUsers) {
572     Value *Undef = UndefValue::get(I->getType());
573     DII->replaceVariableLocationOp(I, Undef);
574   }
575   return !DbgUsers.empty();
576 }
577 
578 /// areAllUsesEqual - Check whether the uses of a value are all the same.
579 /// This is similar to Instruction::hasOneUse() except this will also return
580 /// true when there are no uses or multiple uses that all refer to the same
581 /// value.
582 static bool areAllUsesEqual(Instruction *I) {
583   Value::user_iterator UI = I->user_begin();
584   Value::user_iterator UE = I->user_end();
585   if (UI == UE)
586     return true;
587 
588   User *TheUse = *UI;
589   for (++UI; UI != UE; ++UI) {
590     if (*UI != TheUse)
591       return false;
592   }
593   return true;
594 }
595 
596 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
597 /// dead PHI node, due to being a def-use chain of single-use nodes that
598 /// either forms a cycle or is terminated by a trivially dead instruction,
599 /// delete it.  If that makes any of its operands trivially dead, delete them
600 /// too, recursively.  Return true if a change was made.
601 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
602                                         const TargetLibraryInfo *TLI,
603                                         llvm::MemorySSAUpdater *MSSAU) {
604   SmallPtrSet<Instruction*, 4> Visited;
605   for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
606        I = cast<Instruction>(*I->user_begin())) {
607     if (I->use_empty())
608       return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
609 
610     // If we find an instruction more than once, we're on a cycle that
611     // won't prove fruitful.
612     if (!Visited.insert(I).second) {
613       // Break the cycle and delete the instruction and its operands.
614       I->replaceAllUsesWith(UndefValue::get(I->getType()));
615       (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
616       return true;
617     }
618   }
619   return false;
620 }
621 
622 static bool
623 simplifyAndDCEInstruction(Instruction *I,
624                           SmallSetVector<Instruction *, 16> &WorkList,
625                           const DataLayout &DL,
626                           const TargetLibraryInfo *TLI) {
627   if (isInstructionTriviallyDead(I, TLI)) {
628     salvageDebugInfo(*I);
629 
630     // Null out all of the instruction's operands to see if any operand becomes
631     // dead as we go.
632     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
633       Value *OpV = I->getOperand(i);
634       I->setOperand(i, nullptr);
635 
636       if (!OpV->use_empty() || I == OpV)
637         continue;
638 
639       // If the operand is an instruction that became dead as we nulled out the
640       // operand, and if it is 'trivially' dead, delete it in a future loop
641       // iteration.
642       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
643         if (isInstructionTriviallyDead(OpI, TLI))
644           WorkList.insert(OpI);
645     }
646 
647     I->eraseFromParent();
648 
649     return true;
650   }
651 
652   if (Value *SimpleV = SimplifyInstruction(I, DL)) {
653     // Add the users to the worklist. CAREFUL: an instruction can use itself,
654     // in the case of a phi node.
655     for (User *U : I->users()) {
656       if (U != I) {
657         WorkList.insert(cast<Instruction>(U));
658       }
659     }
660 
661     // Replace the instruction with its simplified value.
662     bool Changed = false;
663     if (!I->use_empty()) {
664       I->replaceAllUsesWith(SimpleV);
665       Changed = true;
666     }
667     if (isInstructionTriviallyDead(I, TLI)) {
668       I->eraseFromParent();
669       Changed = true;
670     }
671     return Changed;
672   }
673   return false;
674 }
675 
676 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
677 /// simplify any instructions in it and recursively delete dead instructions.
678 ///
679 /// This returns true if it changed the code, note that it can delete
680 /// instructions in other blocks as well in this block.
681 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
682                                        const TargetLibraryInfo *TLI) {
683   bool MadeChange = false;
684   const DataLayout &DL = BB->getModule()->getDataLayout();
685 
686 #ifndef NDEBUG
687   // In debug builds, ensure that the terminator of the block is never replaced
688   // or deleted by these simplifications. The idea of simplification is that it
689   // cannot introduce new instructions, and there is no way to replace the
690   // terminator of a block without introducing a new instruction.
691   AssertingVH<Instruction> TerminatorVH(&BB->back());
692 #endif
693 
694   SmallSetVector<Instruction *, 16> WorkList;
695   // Iterate over the original function, only adding insts to the worklist
696   // if they actually need to be revisited. This avoids having to pre-init
697   // the worklist with the entire function's worth of instructions.
698   for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
699        BI != E;) {
700     assert(!BI->isTerminator());
701     Instruction *I = &*BI;
702     ++BI;
703 
704     // We're visiting this instruction now, so make sure it's not in the
705     // worklist from an earlier visit.
706     if (!WorkList.count(I))
707       MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
708   }
709 
710   while (!WorkList.empty()) {
711     Instruction *I = WorkList.pop_back_val();
712     MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
713   }
714   return MadeChange;
715 }
716 
717 //===----------------------------------------------------------------------===//
718 //  Control Flow Graph Restructuring.
719 //
720 
721 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
722                                        DomTreeUpdater *DTU) {
723 
724   // If BB has single-entry PHI nodes, fold them.
725   while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
726     Value *NewVal = PN->getIncomingValue(0);
727     // Replace self referencing PHI with undef, it must be dead.
728     if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
729     PN->replaceAllUsesWith(NewVal);
730     PN->eraseFromParent();
731   }
732 
733   BasicBlock *PredBB = DestBB->getSinglePredecessor();
734   assert(PredBB && "Block doesn't have a single predecessor!");
735 
736   bool ReplaceEntryBB = false;
737   if (PredBB == &DestBB->getParent()->getEntryBlock())
738     ReplaceEntryBB = true;
739 
740   // DTU updates: Collect all the edges that enter
741   // PredBB. These dominator edges will be redirected to DestBB.
742   SmallVector<DominatorTree::UpdateType, 32> Updates;
743 
744   if (DTU) {
745     SmallPtrSet<BasicBlock *, 2> PredsOfPredBB(pred_begin(PredBB),
746                                                pred_end(PredBB));
747     Updates.reserve(Updates.size() + 2 * PredsOfPredBB.size() + 1);
748     for (BasicBlock *PredOfPredBB : PredsOfPredBB)
749       // This predecessor of PredBB may already have DestBB as a successor.
750       if (PredOfPredBB != PredBB)
751         Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
752     for (BasicBlock *PredOfPredBB : PredsOfPredBB)
753       Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
754     Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
755   }
756 
757   // Zap anything that took the address of DestBB.  Not doing this will give the
758   // address an invalid value.
759   if (DestBB->hasAddressTaken()) {
760     BlockAddress *BA = BlockAddress::get(DestBB);
761     Constant *Replacement =
762       ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
763     BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
764                                                      BA->getType()));
765     BA->destroyConstant();
766   }
767 
768   // Anything that branched to PredBB now branches to DestBB.
769   PredBB->replaceAllUsesWith(DestBB);
770 
771   // Splice all the instructions from PredBB to DestBB.
772   PredBB->getTerminator()->eraseFromParent();
773   DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
774   new UnreachableInst(PredBB->getContext(), PredBB);
775 
776   // If the PredBB is the entry block of the function, move DestBB up to
777   // become the entry block after we erase PredBB.
778   if (ReplaceEntryBB)
779     DestBB->moveAfter(PredBB);
780 
781   if (DTU) {
782     assert(PredBB->getInstList().size() == 1 &&
783            isa<UnreachableInst>(PredBB->getTerminator()) &&
784            "The successor list of PredBB isn't empty before "
785            "applying corresponding DTU updates.");
786     DTU->applyUpdatesPermissive(Updates);
787     DTU->deleteBB(PredBB);
788     // Recalculation of DomTree is needed when updating a forward DomTree and
789     // the Entry BB is replaced.
790     if (ReplaceEntryBB && DTU->hasDomTree()) {
791       // The entry block was removed and there is no external interface for
792       // the dominator tree to be notified of this change. In this corner-case
793       // we recalculate the entire tree.
794       DTU->recalculate(*(DestBB->getParent()));
795     }
796   }
797 
798   else {
799     PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
800   }
801 }
802 
803 /// Return true if we can choose one of these values to use in place of the
804 /// other. Note that we will always choose the non-undef value to keep.
805 static bool CanMergeValues(Value *First, Value *Second) {
806   return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
807 }
808 
809 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional
810 /// branch to Succ, into Succ.
811 ///
812 /// Assumption: Succ is the single successor for BB.
813 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
814   assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
815 
816   LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
817                     << Succ->getName() << "\n");
818   // Shortcut, if there is only a single predecessor it must be BB and merging
819   // is always safe
820   if (Succ->getSinglePredecessor()) return true;
821 
822   // Make a list of the predecessors of BB
823   SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
824 
825   // Look at all the phi nodes in Succ, to see if they present a conflict when
826   // merging these blocks
827   for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
828     PHINode *PN = cast<PHINode>(I);
829 
830     // If the incoming value from BB is again a PHINode in
831     // BB which has the same incoming value for *PI as PN does, we can
832     // merge the phi nodes and then the blocks can still be merged
833     PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
834     if (BBPN && BBPN->getParent() == BB) {
835       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
836         BasicBlock *IBB = PN->getIncomingBlock(PI);
837         if (BBPreds.count(IBB) &&
838             !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
839                             PN->getIncomingValue(PI))) {
840           LLVM_DEBUG(dbgs()
841                      << "Can't fold, phi node " << PN->getName() << " in "
842                      << Succ->getName() << " is conflicting with "
843                      << BBPN->getName() << " with regard to common predecessor "
844                      << IBB->getName() << "\n");
845           return false;
846         }
847       }
848     } else {
849       Value* Val = PN->getIncomingValueForBlock(BB);
850       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
851         // See if the incoming value for the common predecessor is equal to the
852         // one for BB, in which case this phi node will not prevent the merging
853         // of the block.
854         BasicBlock *IBB = PN->getIncomingBlock(PI);
855         if (BBPreds.count(IBB) &&
856             !CanMergeValues(Val, PN->getIncomingValue(PI))) {
857           LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
858                             << " in " << Succ->getName()
859                             << " is conflicting with regard to common "
860                             << "predecessor " << IBB->getName() << "\n");
861           return false;
862         }
863       }
864     }
865   }
866 
867   return true;
868 }
869 
870 using PredBlockVector = SmallVector<BasicBlock *, 16>;
871 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
872 
873 /// Determines the value to use as the phi node input for a block.
874 ///
875 /// Select between \p OldVal any value that we know flows from \p BB
876 /// to a particular phi on the basis of which one (if either) is not
877 /// undef. Update IncomingValues based on the selected value.
878 ///
879 /// \param OldVal The value we are considering selecting.
880 /// \param BB The block that the value flows in from.
881 /// \param IncomingValues A map from block-to-value for other phi inputs
882 /// that we have examined.
883 ///
884 /// \returns the selected value.
885 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
886                                           IncomingValueMap &IncomingValues) {
887   if (!isa<UndefValue>(OldVal)) {
888     assert((!IncomingValues.count(BB) ||
889             IncomingValues.find(BB)->second == OldVal) &&
890            "Expected OldVal to match incoming value from BB!");
891 
892     IncomingValues.insert(std::make_pair(BB, OldVal));
893     return OldVal;
894   }
895 
896   IncomingValueMap::const_iterator It = IncomingValues.find(BB);
897   if (It != IncomingValues.end()) return It->second;
898 
899   return OldVal;
900 }
901 
902 /// Create a map from block to value for the operands of a
903 /// given phi.
904 ///
905 /// Create a map from block to value for each non-undef value flowing
906 /// into \p PN.
907 ///
908 /// \param PN The phi we are collecting the map for.
909 /// \param IncomingValues [out] The map from block to value for this phi.
910 static void gatherIncomingValuesToPhi(PHINode *PN,
911                                       IncomingValueMap &IncomingValues) {
912   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
913     BasicBlock *BB = PN->getIncomingBlock(i);
914     Value *V = PN->getIncomingValue(i);
915 
916     if (!isa<UndefValue>(V))
917       IncomingValues.insert(std::make_pair(BB, V));
918   }
919 }
920 
921 /// Replace the incoming undef values to a phi with the values
922 /// from a block-to-value map.
923 ///
924 /// \param PN The phi we are replacing the undefs in.
925 /// \param IncomingValues A map from block to value.
926 static void replaceUndefValuesInPhi(PHINode *PN,
927                                     const IncomingValueMap &IncomingValues) {
928   SmallVector<unsigned> TrueUndefOps;
929   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
930     Value *V = PN->getIncomingValue(i);
931 
932     if (!isa<UndefValue>(V)) continue;
933 
934     BasicBlock *BB = PN->getIncomingBlock(i);
935     IncomingValueMap::const_iterator It = IncomingValues.find(BB);
936 
937     // Keep track of undef/poison incoming values. Those must match, so we fix
938     // them up below if needed.
939     // Note: this is conservatively correct, but we could try harder and group
940     // the undef values per incoming basic block.
941     if (It == IncomingValues.end()) {
942       TrueUndefOps.push_back(i);
943       continue;
944     }
945 
946     // There is a defined value for this incoming block, so map this undef
947     // incoming value to the defined value.
948     PN->setIncomingValue(i, It->second);
949   }
950 
951   // If there are both undef and poison values incoming, then convert those
952   // values to undef. It is invalid to have different values for the same
953   // incoming block.
954   unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
955     return isa<PoisonValue>(PN->getIncomingValue(i));
956   });
957   if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
958     for (unsigned i : TrueUndefOps)
959       PN->setIncomingValue(i, UndefValue::get(PN->getType()));
960   }
961 }
962 
963 /// Replace a value flowing from a block to a phi with
964 /// potentially multiple instances of that value flowing from the
965 /// block's predecessors to the phi.
966 ///
967 /// \param BB The block with the value flowing into the phi.
968 /// \param BBPreds The predecessors of BB.
969 /// \param PN The phi that we are updating.
970 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
971                                                 const PredBlockVector &BBPreds,
972                                                 PHINode *PN) {
973   Value *OldVal = PN->removeIncomingValue(BB, false);
974   assert(OldVal && "No entry in PHI for Pred BB!");
975 
976   IncomingValueMap IncomingValues;
977 
978   // We are merging two blocks - BB, and the block containing PN - and
979   // as a result we need to redirect edges from the predecessors of BB
980   // to go to the block containing PN, and update PN
981   // accordingly. Since we allow merging blocks in the case where the
982   // predecessor and successor blocks both share some predecessors,
983   // and where some of those common predecessors might have undef
984   // values flowing into PN, we want to rewrite those values to be
985   // consistent with the non-undef values.
986 
987   gatherIncomingValuesToPhi(PN, IncomingValues);
988 
989   // If this incoming value is one of the PHI nodes in BB, the new entries
990   // in the PHI node are the entries from the old PHI.
991   if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
992     PHINode *OldValPN = cast<PHINode>(OldVal);
993     for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
994       // Note that, since we are merging phi nodes and BB and Succ might
995       // have common predecessors, we could end up with a phi node with
996       // identical incoming branches. This will be cleaned up later (and
997       // will trigger asserts if we try to clean it up now, without also
998       // simplifying the corresponding conditional branch).
999       BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1000       Value *PredVal = OldValPN->getIncomingValue(i);
1001       Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
1002                                                     IncomingValues);
1003 
1004       // And add a new incoming value for this predecessor for the
1005       // newly retargeted branch.
1006       PN->addIncoming(Selected, PredBB);
1007     }
1008   } else {
1009     for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1010       // Update existing incoming values in PN for this
1011       // predecessor of BB.
1012       BasicBlock *PredBB = BBPreds[i];
1013       Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
1014                                                     IncomingValues);
1015 
1016       // And add a new incoming value for this predecessor for the
1017       // newly retargeted branch.
1018       PN->addIncoming(Selected, PredBB);
1019     }
1020   }
1021 
1022   replaceUndefValuesInPhi(PN, IncomingValues);
1023 }
1024 
1025 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1026                                                    DomTreeUpdater *DTU) {
1027   assert(BB != &BB->getParent()->getEntryBlock() &&
1028          "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1029 
1030   // We can't eliminate infinite loops.
1031   BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1032   if (BB == Succ) return false;
1033 
1034   // Check to see if merging these blocks would cause conflicts for any of the
1035   // phi nodes in BB or Succ. If not, we can safely merge.
1036   if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
1037 
1038   // Check for cases where Succ has multiple predecessors and a PHI node in BB
1039   // has uses which will not disappear when the PHI nodes are merged.  It is
1040   // possible to handle such cases, but difficult: it requires checking whether
1041   // BB dominates Succ, which is non-trivial to calculate in the case where
1042   // Succ has multiple predecessors.  Also, it requires checking whether
1043   // constructing the necessary self-referential PHI node doesn't introduce any
1044   // conflicts; this isn't too difficult, but the previous code for doing this
1045   // was incorrect.
1046   //
1047   // Note that if this check finds a live use, BB dominates Succ, so BB is
1048   // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1049   // folding the branch isn't profitable in that case anyway.
1050   if (!Succ->getSinglePredecessor()) {
1051     BasicBlock::iterator BBI = BB->begin();
1052     while (isa<PHINode>(*BBI)) {
1053       for (Use &U : BBI->uses()) {
1054         if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1055           if (PN->getIncomingBlock(U) != BB)
1056             return false;
1057         } else {
1058           return false;
1059         }
1060       }
1061       ++BBI;
1062     }
1063   }
1064 
1065   // We cannot fold the block if it's a branch to an already present callbr
1066   // successor because that creates duplicate successors.
1067   for (BasicBlock *PredBB : predecessors(BB)) {
1068     if (auto *CBI = dyn_cast<CallBrInst>(PredBB->getTerminator())) {
1069       if (Succ == CBI->getDefaultDest())
1070         return false;
1071       for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i)
1072         if (Succ == CBI->getIndirectDest(i))
1073           return false;
1074     }
1075   }
1076 
1077   LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1078 
1079   SmallVector<DominatorTree::UpdateType, 32> Updates;
1080   if (DTU) {
1081     // All predecessors of BB will be moved to Succ.
1082     SmallPtrSet<BasicBlock *, 8> PredsOfBB(pred_begin(BB), pred_end(BB));
1083     SmallPtrSet<BasicBlock *, 8> PredsOfSucc(pred_begin(Succ), pred_end(Succ));
1084     Updates.reserve(Updates.size() + 2 * PredsOfBB.size() + 1);
1085     for (auto *PredOfBB : PredsOfBB)
1086       // This predecessor of BB may already have Succ as a successor.
1087       if (!PredsOfSucc.contains(PredOfBB))
1088         Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1089     for (auto *PredOfBB : PredsOfBB)
1090       Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1091     Updates.push_back({DominatorTree::Delete, BB, Succ});
1092   }
1093 
1094   if (isa<PHINode>(Succ->begin())) {
1095     // If there is more than one pred of succ, and there are PHI nodes in
1096     // the successor, then we need to add incoming edges for the PHI nodes
1097     //
1098     const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
1099 
1100     // Loop over all of the PHI nodes in the successor of BB.
1101     for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1102       PHINode *PN = cast<PHINode>(I);
1103 
1104       redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
1105     }
1106   }
1107 
1108   if (Succ->getSinglePredecessor()) {
1109     // BB is the only predecessor of Succ, so Succ will end up with exactly
1110     // the same predecessors BB had.
1111 
1112     // Copy over any phi, debug or lifetime instruction.
1113     BB->getTerminator()->eraseFromParent();
1114     Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
1115                                BB->getInstList());
1116   } else {
1117     while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1118       // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1119       assert(PN->use_empty() && "There shouldn't be any uses here!");
1120       PN->eraseFromParent();
1121     }
1122   }
1123 
1124   // If the unconditional branch we replaced contains llvm.loop metadata, we
1125   // add the metadata to the branch instructions in the predecessors.
1126   unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
1127   Instruction *TI = BB->getTerminator();
1128   if (TI)
1129     if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
1130       for (BasicBlock *Pred : predecessors(BB))
1131         Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
1132 
1133   // For AutoFDO, since BB is going to be removed, we won't be able to sample
1134   // it. To avoid assigning a zero weight for BB, move all its pseudo probes
1135   // into Succ and mark them dangling. This should allow the counts inference a
1136   // chance to get a more reasonable weight for BB.
1137   moveAndDanglePseudoProbes(BB, &*Succ->getFirstInsertionPt());
1138 
1139   // Everything that jumped to BB now goes to Succ.
1140   BB->replaceAllUsesWith(Succ);
1141   if (!Succ->hasName()) Succ->takeName(BB);
1142 
1143   // Clear the successor list of BB to match updates applying to DTU later.
1144   if (BB->getTerminator())
1145     BB->getInstList().pop_back();
1146   new UnreachableInst(BB->getContext(), BB);
1147   assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1148                            "applying corresponding DTU updates.");
1149 
1150   if (DTU) {
1151     DTU->applyUpdates(Updates);
1152     DTU->deleteBB(BB);
1153   } else {
1154     BB->eraseFromParent(); // Delete the old basic block.
1155   }
1156   return true;
1157 }
1158 
1159 static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB) {
1160   // This implementation doesn't currently consider undef operands
1161   // specially. Theoretically, two phis which are identical except for
1162   // one having an undef where the other doesn't could be collapsed.
1163 
1164   bool Changed = false;
1165 
1166   // Examine each PHI.
1167   // Note that increment of I must *NOT* be in the iteration_expression, since
1168   // we don't want to immediately advance when we restart from the beginning.
1169   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1170     ++I;
1171     // Is there an identical PHI node in this basic block?
1172     // Note that we only look in the upper square's triangle,
1173     // we already checked that the lower triangle PHI's aren't identical.
1174     for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1175       if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1176         continue;
1177       // A duplicate. Replace this PHI with the base PHI.
1178       ++NumPHICSEs;
1179       DuplicatePN->replaceAllUsesWith(PN);
1180       DuplicatePN->eraseFromParent();
1181       Changed = true;
1182 
1183       // The RAUW can change PHIs that we already visited.
1184       I = BB->begin();
1185       break; // Start over from the beginning.
1186     }
1187   }
1188   return Changed;
1189 }
1190 
1191 static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB) {
1192   // This implementation doesn't currently consider undef operands
1193   // specially. Theoretically, two phis which are identical except for
1194   // one having an undef where the other doesn't could be collapsed.
1195 
1196   struct PHIDenseMapInfo {
1197     static PHINode *getEmptyKey() {
1198       return DenseMapInfo<PHINode *>::getEmptyKey();
1199     }
1200 
1201     static PHINode *getTombstoneKey() {
1202       return DenseMapInfo<PHINode *>::getTombstoneKey();
1203     }
1204 
1205     static bool isSentinel(PHINode *PN) {
1206       return PN == getEmptyKey() || PN == getTombstoneKey();
1207     }
1208 
1209     // WARNING: this logic must be kept in sync with
1210     //          Instruction::isIdenticalToWhenDefined()!
1211     static unsigned getHashValueImpl(PHINode *PN) {
1212       // Compute a hash value on the operands. Instcombine will likely have
1213       // sorted them, which helps expose duplicates, but we have to check all
1214       // the operands to be safe in case instcombine hasn't run.
1215       return static_cast<unsigned>(hash_combine(
1216           hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1217           hash_combine_range(PN->block_begin(), PN->block_end())));
1218     }
1219 
1220     static unsigned getHashValue(PHINode *PN) {
1221 #ifndef NDEBUG
1222       // If -phicse-debug-hash was specified, return a constant -- this
1223       // will force all hashing to collide, so we'll exhaustively search
1224       // the table for a match, and the assertion in isEqual will fire if
1225       // there's a bug causing equal keys to hash differently.
1226       if (PHICSEDebugHash)
1227         return 0;
1228 #endif
1229       return getHashValueImpl(PN);
1230     }
1231 
1232     static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1233       if (isSentinel(LHS) || isSentinel(RHS))
1234         return LHS == RHS;
1235       return LHS->isIdenticalTo(RHS);
1236     }
1237 
1238     static bool isEqual(PHINode *LHS, PHINode *RHS) {
1239       // These comparisons are nontrivial, so assert that equality implies
1240       // hash equality (DenseMap demands this as an invariant).
1241       bool Result = isEqualImpl(LHS, RHS);
1242       assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1243              getHashValueImpl(LHS) == getHashValueImpl(RHS));
1244       return Result;
1245     }
1246   };
1247 
1248   // Set of unique PHINodes.
1249   DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1250   PHISet.reserve(4 * PHICSENumPHISmallSize);
1251 
1252   // Examine each PHI.
1253   bool Changed = false;
1254   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1255     auto Inserted = PHISet.insert(PN);
1256     if (!Inserted.second) {
1257       // A duplicate. Replace this PHI with its duplicate.
1258       ++NumPHICSEs;
1259       PN->replaceAllUsesWith(*Inserted.first);
1260       PN->eraseFromParent();
1261       Changed = true;
1262 
1263       // The RAUW can change PHIs that we already visited. Start over from the
1264       // beginning.
1265       PHISet.clear();
1266       I = BB->begin();
1267     }
1268   }
1269 
1270   return Changed;
1271 }
1272 
1273 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1274   if (
1275 #ifndef NDEBUG
1276       !PHICSEDebugHash &&
1277 #endif
1278       hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize))
1279     return EliminateDuplicatePHINodesNaiveImpl(BB);
1280   return EliminateDuplicatePHINodesSetBasedImpl(BB);
1281 }
1282 
1283 /// If the specified pointer points to an object that we control, try to modify
1284 /// the object's alignment to PrefAlign. Returns a minimum known alignment of
1285 /// the value after the operation, which may be lower than PrefAlign.
1286 ///
1287 /// Increating value alignment isn't often possible though. If alignment is
1288 /// important, a more reliable approach is to simply align all global variables
1289 /// and allocation instructions to their preferred alignment from the beginning.
1290 static Align tryEnforceAlignment(Value *V, Align PrefAlign,
1291                                  const DataLayout &DL) {
1292   V = V->stripPointerCasts();
1293 
1294   if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1295     // TODO: Ideally, this function would not be called if PrefAlign is smaller
1296     // than the current alignment, as the known bits calculation should have
1297     // already taken it into account. However, this is not always the case,
1298     // as computeKnownBits() has a depth limit, while stripPointerCasts()
1299     // doesn't.
1300     Align CurrentAlign = AI->getAlign();
1301     if (PrefAlign <= CurrentAlign)
1302       return CurrentAlign;
1303 
1304     // If the preferred alignment is greater than the natural stack alignment
1305     // then don't round up. This avoids dynamic stack realignment.
1306     if (DL.exceedsNaturalStackAlignment(PrefAlign))
1307       return CurrentAlign;
1308     AI->setAlignment(PrefAlign);
1309     return PrefAlign;
1310   }
1311 
1312   if (auto *GO = dyn_cast<GlobalObject>(V)) {
1313     // TODO: as above, this shouldn't be necessary.
1314     Align CurrentAlign = GO->getPointerAlignment(DL);
1315     if (PrefAlign <= CurrentAlign)
1316       return CurrentAlign;
1317 
1318     // If there is a large requested alignment and we can, bump up the alignment
1319     // of the global.  If the memory we set aside for the global may not be the
1320     // memory used by the final program then it is impossible for us to reliably
1321     // enforce the preferred alignment.
1322     if (!GO->canIncreaseAlignment())
1323       return CurrentAlign;
1324 
1325     GO->setAlignment(PrefAlign);
1326     return PrefAlign;
1327   }
1328 
1329   return Align(1);
1330 }
1331 
1332 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1333                                        const DataLayout &DL,
1334                                        const Instruction *CxtI,
1335                                        AssumptionCache *AC,
1336                                        const DominatorTree *DT) {
1337   assert(V->getType()->isPointerTy() &&
1338          "getOrEnforceKnownAlignment expects a pointer!");
1339 
1340   KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1341   unsigned TrailZ = Known.countMinTrailingZeros();
1342 
1343   // Avoid trouble with ridiculously large TrailZ values, such as
1344   // those computed from a null pointer.
1345   // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1346   TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1347 
1348   Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1349 
1350   if (PrefAlign && *PrefAlign > Alignment)
1351     Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1352 
1353   // We don't need to make any adjustment.
1354   return Alignment;
1355 }
1356 
1357 ///===---------------------------------------------------------------------===//
1358 ///  Dbg Intrinsic utilities
1359 ///
1360 
1361 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1362 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1363                              DIExpression *DIExpr,
1364                              PHINode *APN) {
1365   // Since we can't guarantee that the original dbg.declare instrinsic
1366   // is removed by LowerDbgDeclare(), we need to make sure that we are
1367   // not inserting the same dbg.value intrinsic over and over.
1368   SmallVector<DbgValueInst *, 1> DbgValues;
1369   findDbgValues(DbgValues, APN);
1370   for (auto *DVI : DbgValues) {
1371     assert(is_contained(DVI->getValues(), APN));
1372     if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1373       return true;
1374   }
1375   return false;
1376 }
1377 
1378 /// Check if the alloc size of \p ValTy is large enough to cover the variable
1379 /// (or fragment of the variable) described by \p DII.
1380 ///
1381 /// This is primarily intended as a helper for the different
1382 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is
1383 /// converted describes an alloca'd variable, so we need to use the
1384 /// alloc size of the value when doing the comparison. E.g. an i1 value will be
1385 /// identified as covering an n-bit fragment, if the store size of i1 is at
1386 /// least n bits.
1387 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1388   const DataLayout &DL = DII->getModule()->getDataLayout();
1389   TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1390   if (Optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits()) {
1391     assert(!ValueSize.isScalable() &&
1392            "Fragments don't work on scalable types.");
1393     return ValueSize.getFixedSize() >= *FragmentSize;
1394   }
1395   // We can't always calculate the size of the DI variable (e.g. if it is a
1396   // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1397   // intead.
1398   if (DII->isAddressOfVariable()) {
1399     // DII should have exactly 1 location when it is an address.
1400     assert(DII->getNumVariableLocationOps() == 1 &&
1401            "address of variable must have exactly 1 location operand.");
1402     if (auto *AI =
1403             dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1404       if (Optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1405         assert(ValueSize.isScalable() == FragmentSize->isScalable() &&
1406                "Both sizes should agree on the scalable flag.");
1407         return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1408       }
1409     }
1410   }
1411   // Could not determine size of variable. Conservatively return false.
1412   return false;
1413 }
1414 
1415 /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted
1416 /// to a dbg.value. Because no machine insts can come from debug intrinsics,
1417 /// only the scope and inlinedAt is significant. Zero line numbers are used in
1418 /// case this DebugLoc leaks into any adjacent instructions.
1419 static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) {
1420   // Original dbg.declare must have a location.
1421   DebugLoc DeclareLoc = DII->getDebugLoc();
1422   MDNode *Scope = DeclareLoc.getScope();
1423   DILocation *InlinedAt = DeclareLoc.getInlinedAt();
1424   // Produce an unknown location with the correct scope / inlinedAt fields.
1425   return DILocation::get(DII->getContext(), 0, 0, Scope, InlinedAt);
1426 }
1427 
1428 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1429 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1430 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1431                                            StoreInst *SI, DIBuilder &Builder) {
1432   assert(DII->isAddressOfVariable());
1433   auto *DIVar = DII->getVariable();
1434   assert(DIVar && "Missing variable");
1435   auto *DIExpr = DII->getExpression();
1436   Value *DV = SI->getValueOperand();
1437 
1438   DebugLoc NewLoc = getDebugValueLoc(DII, SI);
1439 
1440   if (!valueCoversEntireFragment(DV->getType(), DII)) {
1441     // FIXME: If storing to a part of the variable described by the dbg.declare,
1442     // then we want to insert a dbg.value for the corresponding fragment.
1443     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1444                       << *DII << '\n');
1445     // For now, when there is a store to parts of the variable (but we do not
1446     // know which part) we insert an dbg.value instrinsic to indicate that we
1447     // know nothing about the variable's content.
1448     DV = UndefValue::get(DV->getType());
1449     Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1450     return;
1451   }
1452 
1453   Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1454 }
1455 
1456 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1457 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1458 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1459                                            LoadInst *LI, DIBuilder &Builder) {
1460   auto *DIVar = DII->getVariable();
1461   auto *DIExpr = DII->getExpression();
1462   assert(DIVar && "Missing variable");
1463 
1464   if (!valueCoversEntireFragment(LI->getType(), DII)) {
1465     // FIXME: If only referring to a part of the variable described by the
1466     // dbg.declare, then we want to insert a dbg.value for the corresponding
1467     // fragment.
1468     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1469                       << *DII << '\n');
1470     return;
1471   }
1472 
1473   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1474 
1475   // We are now tracking the loaded value instead of the address. In the
1476   // future if multi-location support is added to the IR, it might be
1477   // preferable to keep tracking both the loaded value and the original
1478   // address in case the alloca can not be elided.
1479   Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1480       LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr);
1481   DbgValue->insertAfter(LI);
1482 }
1483 
1484 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1485 /// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1486 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1487                                            PHINode *APN, DIBuilder &Builder) {
1488   auto *DIVar = DII->getVariable();
1489   auto *DIExpr = DII->getExpression();
1490   assert(DIVar && "Missing variable");
1491 
1492   if (PhiHasDebugValue(DIVar, DIExpr, APN))
1493     return;
1494 
1495   if (!valueCoversEntireFragment(APN->getType(), DII)) {
1496     // FIXME: If only referring to a part of the variable described by the
1497     // dbg.declare, then we want to insert a dbg.value for the corresponding
1498     // fragment.
1499     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1500                       << *DII << '\n');
1501     return;
1502   }
1503 
1504   BasicBlock *BB = APN->getParent();
1505   auto InsertionPt = BB->getFirstInsertionPt();
1506 
1507   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1508 
1509   // The block may be a catchswitch block, which does not have a valid
1510   // insertion point.
1511   // FIXME: Insert dbg.value markers in the successors when appropriate.
1512   if (InsertionPt != BB->end())
1513     Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt);
1514 }
1515 
1516 /// Determine whether this alloca is either a VLA or an array.
1517 static bool isArray(AllocaInst *AI) {
1518   return AI->isArrayAllocation() ||
1519          (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1520 }
1521 
1522 /// Determine whether this alloca is a structure.
1523 static bool isStructure(AllocaInst *AI) {
1524   return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1525 }
1526 
1527 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1528 /// of llvm.dbg.value intrinsics.
1529 bool llvm::LowerDbgDeclare(Function &F) {
1530   bool Changed = false;
1531   DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1532   SmallVector<DbgDeclareInst *, 4> Dbgs;
1533   for (auto &FI : F)
1534     for (Instruction &BI : FI)
1535       if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1536         Dbgs.push_back(DDI);
1537 
1538   if (Dbgs.empty())
1539     return Changed;
1540 
1541   for (auto &I : Dbgs) {
1542     DbgDeclareInst *DDI = I;
1543     AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1544     // If this is an alloca for a scalar variable, insert a dbg.value
1545     // at each load and store to the alloca and erase the dbg.declare.
1546     // The dbg.values allow tracking a variable even if it is not
1547     // stored on the stack, while the dbg.declare can only describe
1548     // the stack slot (and at a lexical-scope granularity). Later
1549     // passes will attempt to elide the stack slot.
1550     if (!AI || isArray(AI) || isStructure(AI))
1551       continue;
1552 
1553     // A volatile load/store means that the alloca can't be elided anyway.
1554     if (llvm::any_of(AI->users(), [](User *U) -> bool {
1555           if (LoadInst *LI = dyn_cast<LoadInst>(U))
1556             return LI->isVolatile();
1557           if (StoreInst *SI = dyn_cast<StoreInst>(U))
1558             return SI->isVolatile();
1559           return false;
1560         }))
1561       continue;
1562 
1563     SmallVector<const Value *, 8> WorkList;
1564     WorkList.push_back(AI);
1565     while (!WorkList.empty()) {
1566       const Value *V = WorkList.pop_back_val();
1567       for (auto &AIUse : V->uses()) {
1568         User *U = AIUse.getUser();
1569         if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1570           if (AIUse.getOperandNo() == 1)
1571             ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1572         } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1573           ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1574         } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1575           // This is a call by-value or some other instruction that takes a
1576           // pointer to the variable. Insert a *value* intrinsic that describes
1577           // the variable by dereferencing the alloca.
1578           if (!CI->isLifetimeStartOrEnd()) {
1579             DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr);
1580             auto *DerefExpr =
1581                 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1582             DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr,
1583                                         NewLoc, CI);
1584           }
1585         } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1586           if (BI->getType()->isPointerTy())
1587             WorkList.push_back(BI);
1588         }
1589       }
1590     }
1591     DDI->eraseFromParent();
1592     Changed = true;
1593   }
1594 
1595   if (Changed)
1596   for (BasicBlock &BB : F)
1597     RemoveRedundantDbgInstrs(&BB);
1598 
1599   return Changed;
1600 }
1601 
1602 /// Propagate dbg.value intrinsics through the newly inserted PHIs.
1603 void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1604                                     SmallVectorImpl<PHINode *> &InsertedPHIs) {
1605   assert(BB && "No BasicBlock to clone dbg.value(s) from.");
1606   if (InsertedPHIs.size() == 0)
1607     return;
1608 
1609   // Map existing PHI nodes to their dbg.values.
1610   ValueToValueMapTy DbgValueMap;
1611   for (auto &I : *BB) {
1612     if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1613       for (Value *V : DbgII->location_ops())
1614         if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1615           DbgValueMap.insert({Loc, DbgII});
1616     }
1617   }
1618   if (DbgValueMap.size() == 0)
1619     return;
1620 
1621   // Map a pair of the destination BB and old dbg.value to the new dbg.value,
1622   // so that if a dbg.value is being rewritten to use more than one of the
1623   // inserted PHIs in the same destination BB, we can update the same dbg.value
1624   // with all the new PHIs instead of creating one copy for each.
1625   MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
1626             DbgVariableIntrinsic *>
1627       NewDbgValueMap;
1628   // Then iterate through the new PHIs and look to see if they use one of the
1629   // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
1630   // propagate the info through the new PHI. If we use more than one new PHI in
1631   // a single destination BB with the same old dbg.value, merge the updates so
1632   // that we get a single new dbg.value with all the new PHIs.
1633   for (auto PHI : InsertedPHIs) {
1634     BasicBlock *Parent = PHI->getParent();
1635     // Avoid inserting an intrinsic into an EH block.
1636     if (Parent->getFirstNonPHI()->isEHPad())
1637       continue;
1638     for (auto VI : PHI->operand_values()) {
1639       auto V = DbgValueMap.find(VI);
1640       if (V != DbgValueMap.end()) {
1641         auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
1642         auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1643         if (NewDI == NewDbgValueMap.end()) {
1644           auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
1645           NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1646         }
1647         DbgVariableIntrinsic *NewDbgII = NewDI->second;
1648         // If PHI contains VI as an operand more than once, we may
1649         // replaced it in NewDbgII; confirm that it is present.
1650         if (is_contained(NewDbgII->location_ops(), VI))
1651           NewDbgII->replaceVariableLocationOp(VI, PHI);
1652       }
1653     }
1654   }
1655   // Insert thew new dbg.values into their destination blocks.
1656   for (auto DI : NewDbgValueMap) {
1657     BasicBlock *Parent = DI.first.first;
1658     auto *NewDbgII = DI.second;
1659     auto InsertionPt = Parent->getFirstInsertionPt();
1660     assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1661     NewDbgII->insertBefore(&*InsertionPt);
1662   }
1663 }
1664 
1665 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1666                              DIBuilder &Builder, uint8_t DIExprFlags,
1667                              int Offset) {
1668   auto DbgAddrs = FindDbgAddrUses(Address);
1669   for (DbgVariableIntrinsic *DII : DbgAddrs) {
1670     DebugLoc Loc = DII->getDebugLoc();
1671     auto *DIVar = DII->getVariable();
1672     auto *DIExpr = DII->getExpression();
1673     assert(DIVar && "Missing variable");
1674     DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1675     // Insert llvm.dbg.declare immediately before DII, and remove old
1676     // llvm.dbg.declare.
1677     Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII);
1678     DII->eraseFromParent();
1679   }
1680   return !DbgAddrs.empty();
1681 }
1682 
1683 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1684                                         DIBuilder &Builder, int Offset) {
1685   DebugLoc Loc = DVI->getDebugLoc();
1686   auto *DIVar = DVI->getVariable();
1687   auto *DIExpr = DVI->getExpression();
1688   assert(DIVar && "Missing variable");
1689 
1690   // This is an alloca-based llvm.dbg.value. The first thing it should do with
1691   // the alloca pointer is dereference it. Otherwise we don't know how to handle
1692   // it and give up.
1693   if (!DIExpr || DIExpr->getNumElements() < 1 ||
1694       DIExpr->getElement(0) != dwarf::DW_OP_deref)
1695     return;
1696 
1697   // Insert the offset before the first deref.
1698   // We could just change the offset argument of dbg.value, but it's unsigned...
1699   if (Offset)
1700     DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1701 
1702   Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1703   DVI->eraseFromParent();
1704 }
1705 
1706 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1707                                     DIBuilder &Builder, int Offset) {
1708   if (auto *L = LocalAsMetadata::getIfExists(AI))
1709     if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1710       for (Use &U : llvm::make_early_inc_range(MDV->uses()))
1711         if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1712           replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1713 }
1714 
1715 /// Where possible to salvage debug information for \p I do so
1716 /// and return True. If not possible mark undef and return False.
1717 void llvm::salvageDebugInfo(Instruction &I) {
1718   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
1719   findDbgUsers(DbgUsers, &I);
1720   salvageDebugInfoForDbgValues(I, DbgUsers);
1721 }
1722 
1723 void llvm::salvageDebugInfoForDbgValues(
1724     Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) {
1725   bool Salvaged = false;
1726 
1727   for (auto *DII : DbgUsers) {
1728     // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
1729     // are implicitly pointing out the value as a DWARF memory location
1730     // description.
1731     bool StackValue = isa<DbgValueInst>(DII);
1732     auto DIILocation = DII->location_ops();
1733     assert(
1734         is_contained(DIILocation, &I) &&
1735         "DbgVariableIntrinsic must use salvaged instruction as its location");
1736     unsigned LocNo = std::distance(DIILocation.begin(), find(DIILocation, &I));
1737     SmallVector<Value *, 4> AdditionalValues;
1738     DIExpression *SalvagedExpr = salvageDebugInfoImpl(
1739         I, DII->getExpression(), StackValue, LocNo, AdditionalValues);
1740 
1741     // salvageDebugInfoImpl should fail on examining the first element of
1742     // DbgUsers, or none of them.
1743     if (!SalvagedExpr)
1744       break;
1745 
1746     DII->replaceVariableLocationOp(&I, I.getOperand(0));
1747     if (AdditionalValues.empty()) {
1748       DII->setExpression(SalvagedExpr);
1749     } else if (isa<DbgValueInst>(DII)) {
1750       DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
1751     } else {
1752       // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
1753       // currently only valid for stack value expressions.
1754       Value *Undef = UndefValue::get(I.getOperand(0)->getType());
1755       DII->replaceVariableLocationOp(I.getOperand(0), Undef);
1756     }
1757     LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1758     Salvaged = true;
1759   }
1760 
1761   if (Salvaged)
1762     return;
1763 
1764   for (auto *DII : DbgUsers) {
1765     Value *Undef = UndefValue::get(I.getType());
1766     DII->replaceVariableLocationOp(&I, Undef);
1767   }
1768 }
1769 
1770 bool getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
1771                          uint64_t CurrentLocOps,
1772                          SmallVectorImpl<uint64_t> &Opcodes,
1773                          SmallVectorImpl<Value *> &AdditionalValues) {
1774   unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
1775   // Rewrite a GEP into a DIExpression.
1776   SmallDenseMap<Value *, APInt, 8> VariableOffsets;
1777   APInt ConstantOffset(BitWidth, 0);
1778   if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
1779     return false;
1780   if (!VariableOffsets.empty() && !CurrentLocOps) {
1781     Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
1782     CurrentLocOps = 1;
1783   }
1784   for (auto Offset : VariableOffsets) {
1785     AdditionalValues.push_back(Offset.first);
1786     assert(Offset.second.isStrictlyPositive() &&
1787            "Expected strictly positive multiplier for offset.");
1788     Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
1789                     Offset.second.getZExtValue(), dwarf::DW_OP_mul,
1790                     dwarf::DW_OP_plus});
1791   }
1792   DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
1793   return true;
1794 }
1795 
1796 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
1797   switch (Opcode) {
1798   case Instruction::Add:
1799     return dwarf::DW_OP_plus;
1800   case Instruction::Sub:
1801     return dwarf::DW_OP_minus;
1802   case Instruction::Mul:
1803     return dwarf::DW_OP_mul;
1804   case Instruction::SDiv:
1805     return dwarf::DW_OP_div;
1806   case Instruction::SRem:
1807     return dwarf::DW_OP_mod;
1808   case Instruction::Or:
1809     return dwarf::DW_OP_or;
1810   case Instruction::And:
1811     return dwarf::DW_OP_and;
1812   case Instruction::Xor:
1813     return dwarf::DW_OP_xor;
1814   case Instruction::Shl:
1815     return dwarf::DW_OP_shl;
1816   case Instruction::LShr:
1817     return dwarf::DW_OP_shr;
1818   case Instruction::AShr:
1819     return dwarf::DW_OP_shra;
1820   default:
1821     // TODO: Salvage from each kind of binop we know about.
1822     return 0;
1823   }
1824 }
1825 
1826 bool getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
1827                            SmallVectorImpl<uint64_t> &Opcodes,
1828                            SmallVectorImpl<Value *> &AdditionalValues) {
1829   // Handle binary operations with constant integer operands as a special case.
1830   auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
1831   // Values wider than 64 bits cannot be represented within a DIExpression.
1832   if (ConstInt && ConstInt->getBitWidth() > 64)
1833     return false;
1834 
1835   Instruction::BinaryOps BinOpcode = BI->getOpcode();
1836   // Push any Constant Int operand onto the expression stack.
1837   if (ConstInt) {
1838     uint64_t Val = ConstInt->getSExtValue();
1839     // Add or Sub Instructions with a constant operand can potentially be
1840     // simplified.
1841     if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
1842       uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
1843       DIExpression::appendOffset(Opcodes, Offset);
1844       return true;
1845     }
1846     Opcodes.append({dwarf::DW_OP_constu, Val});
1847   } else {
1848     if (!CurrentLocOps) {
1849       Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
1850       CurrentLocOps = 1;
1851     }
1852     Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
1853     AdditionalValues.push_back(BI->getOperand(1));
1854   }
1855 
1856   // Add salvaged binary operator to expression stack, if it has a valid
1857   // representation in a DIExpression.
1858   uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
1859   if (!DwarfBinOp)
1860     return false;
1861   Opcodes.push_back(DwarfBinOp);
1862 
1863   return true;
1864 }
1865 
1866 DIExpression *
1867 llvm::salvageDebugInfoImpl(Instruction &I, DIExpression *SrcDIExpr,
1868                            bool WithStackValue, unsigned LocNo,
1869                            SmallVectorImpl<Value *> &AdditionalValues) {
1870   uint64_t CurrentLocOps = SrcDIExpr->getNumLocationOperands();
1871   auto &M = *I.getModule();
1872   auto &DL = M.getDataLayout();
1873 
1874   // Apply a vector of opcodes to the source DIExpression.
1875   auto doSalvage = [&](SmallVectorImpl<uint64_t> &Ops) -> DIExpression * {
1876     DIExpression *DIExpr = SrcDIExpr;
1877     if (!Ops.empty()) {
1878       DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, LocNo, WithStackValue);
1879     }
1880     return DIExpr;
1881   };
1882 
1883   // initializer-list helper for applying operators to the source DIExpression.
1884   auto applyOps = [&](ArrayRef<uint64_t> Opcodes) {
1885     SmallVector<uint64_t, 8> Ops(Opcodes.begin(), Opcodes.end());
1886     return doSalvage(Ops);
1887   };
1888 
1889   if (auto *CI = dyn_cast<CastInst>(&I)) {
1890     // No-op casts are irrelevant for debug info.
1891     if (CI->isNoopCast(DL))
1892       return SrcDIExpr;
1893 
1894     Type *Type = CI->getType();
1895     // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
1896     if (Type->isVectorTy() ||
1897         !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I)))
1898       return nullptr;
1899 
1900     Value *FromValue = CI->getOperand(0);
1901     unsigned FromTypeBitSize = FromValue->getType()->getScalarSizeInBits();
1902     unsigned ToTypeBitSize = Type->getScalarSizeInBits();
1903 
1904     return applyOps(DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
1905                                             isa<SExtInst>(&I)));
1906   }
1907 
1908   SmallVector<uint64_t, 8> Ops;
1909   if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1910     if (getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues))
1911       return doSalvage(Ops);
1912   } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1913     if (getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues))
1914       return doSalvage(Ops);
1915   }
1916   // *Not* to do: we should not attempt to salvage load instructions,
1917   // because the validity and lifetime of a dbg.value containing
1918   // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
1919   return nullptr;
1920 }
1921 
1922 /// A replacement for a dbg.value expression.
1923 using DbgValReplacement = Optional<DIExpression *>;
1924 
1925 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
1926 /// possibly moving/undefing users to prevent use-before-def. Returns true if
1927 /// changes are made.
1928 static bool rewriteDebugUsers(
1929     Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
1930     function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) {
1931   // Find debug users of From.
1932   SmallVector<DbgVariableIntrinsic *, 1> Users;
1933   findDbgUsers(Users, &From);
1934   if (Users.empty())
1935     return false;
1936 
1937   // Prevent use-before-def of To.
1938   bool Changed = false;
1939   SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
1940   if (isa<Instruction>(&To)) {
1941     bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
1942 
1943     for (auto *DII : Users) {
1944       // It's common to see a debug user between From and DomPoint. Move it
1945       // after DomPoint to preserve the variable update without any reordering.
1946       if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
1947         LLVM_DEBUG(dbgs() << "MOVE:  " << *DII << '\n');
1948         DII->moveAfter(&DomPoint);
1949         Changed = true;
1950 
1951       // Users which otherwise aren't dominated by the replacement value must
1952       // be salvaged or deleted.
1953       } else if (!DT.dominates(&DomPoint, DII)) {
1954         UndefOrSalvage.insert(DII);
1955       }
1956     }
1957   }
1958 
1959   // Update debug users without use-before-def risk.
1960   for (auto *DII : Users) {
1961     if (UndefOrSalvage.count(DII))
1962       continue;
1963 
1964     DbgValReplacement DVR = RewriteExpr(*DII);
1965     if (!DVR)
1966       continue;
1967 
1968     DII->replaceVariableLocationOp(&From, &To);
1969     DII->setExpression(*DVR);
1970     LLVM_DEBUG(dbgs() << "REWRITE:  " << *DII << '\n');
1971     Changed = true;
1972   }
1973 
1974   if (!UndefOrSalvage.empty()) {
1975     // Try to salvage the remaining debug users.
1976     salvageDebugInfo(From);
1977     Changed = true;
1978   }
1979 
1980   return Changed;
1981 }
1982 
1983 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
1984 /// losslessly preserve the bits and semantics of the value. This predicate is
1985 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
1986 ///
1987 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it
1988 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
1989 /// and also does not allow lossless pointer <-> integer conversions.
1990 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
1991                                          Type *ToTy) {
1992   // Trivially compatible types.
1993   if (FromTy == ToTy)
1994     return true;
1995 
1996   // Handle compatible pointer <-> integer conversions.
1997   if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
1998     bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
1999     bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2000                               !DL.isNonIntegralPointerType(ToTy);
2001     return SameSize && LosslessConversion;
2002   }
2003 
2004   // TODO: This is not exhaustive.
2005   return false;
2006 }
2007 
2008 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
2009                                  Instruction &DomPoint, DominatorTree &DT) {
2010   // Exit early if From has no debug users.
2011   if (!From.isUsedByMetadata())
2012     return false;
2013 
2014   assert(&From != &To && "Can't replace something with itself");
2015 
2016   Type *FromTy = From.getType();
2017   Type *ToTy = To.getType();
2018 
2019   auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2020     return DII.getExpression();
2021   };
2022 
2023   // Handle no-op conversions.
2024   Module &M = *From.getModule();
2025   const DataLayout &DL = M.getDataLayout();
2026   if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2027     return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2028 
2029   // Handle integer-to-integer widening and narrowing.
2030   // FIXME: Use DW_OP_convert when it's available everywhere.
2031   if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2032     uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2033     uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2034     assert(FromBits != ToBits && "Unexpected no-op conversion");
2035 
2036     // When the width of the result grows, assume that a debugger will only
2037     // access the low `FromBits` bits when inspecting the source variable.
2038     if (FromBits < ToBits)
2039       return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2040 
2041     // The width of the result has shrunk. Use sign/zero extension to describe
2042     // the source variable's high bits.
2043     auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2044       DILocalVariable *Var = DII.getVariable();
2045 
2046       // Without knowing signedness, sign/zero extension isn't possible.
2047       auto Signedness = Var->getSignedness();
2048       if (!Signedness)
2049         return None;
2050 
2051       bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2052       return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2053                                      Signed);
2054     };
2055     return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt);
2056   }
2057 
2058   // TODO: Floating-point conversions, vectors.
2059   return false;
2060 }
2061 
2062 std::pair<unsigned, unsigned>
2063 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2064   unsigned NumDeadInst = 0;
2065   unsigned NumDeadDbgInst = 0;
2066   // Delete the instructions backwards, as it has a reduced likelihood of
2067   // having to update as many def-use and use-def chains.
2068   Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2069   while (EndInst != &BB->front()) {
2070     // Delete the next to last instruction.
2071     Instruction *Inst = &*--EndInst->getIterator();
2072     if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2073       Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
2074     if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2075       EndInst = Inst;
2076       continue;
2077     }
2078     if (isa<DbgInfoIntrinsic>(Inst))
2079       ++NumDeadDbgInst;
2080     else
2081       ++NumDeadInst;
2082     Inst->eraseFromParent();
2083   }
2084   return {NumDeadInst, NumDeadDbgInst};
2085 }
2086 
2087 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
2088                                    bool PreserveLCSSA, DomTreeUpdater *DTU,
2089                                    MemorySSAUpdater *MSSAU) {
2090   BasicBlock *BB = I->getParent();
2091 
2092   if (MSSAU)
2093     MSSAU->changeToUnreachable(I);
2094 
2095   SmallSet<BasicBlock *, 8> UniqueSuccessors;
2096 
2097   // Loop over all of the successors, removing BB's entry from any PHI
2098   // nodes.
2099   for (BasicBlock *Successor : successors(BB)) {
2100     Successor->removePredecessor(BB, PreserveLCSSA);
2101     if (DTU)
2102       UniqueSuccessors.insert(Successor);
2103   }
2104   // Insert a call to llvm.trap right before this.  This turns the undefined
2105   // behavior into a hard fail instead of falling through into random code.
2106   if (UseLLVMTrap) {
2107     Function *TrapFn =
2108       Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
2109     CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
2110     CallTrap->setDebugLoc(I->getDebugLoc());
2111   }
2112   auto *UI = new UnreachableInst(I->getContext(), I);
2113   UI->setDebugLoc(I->getDebugLoc());
2114 
2115   // All instructions after this are dead.
2116   unsigned NumInstrsRemoved = 0;
2117   BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2118   while (BBI != BBE) {
2119     if (!BBI->use_empty())
2120       BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
2121     BB->getInstList().erase(BBI++);
2122     ++NumInstrsRemoved;
2123   }
2124   if (DTU) {
2125     SmallVector<DominatorTree::UpdateType, 8> Updates;
2126     Updates.reserve(UniqueSuccessors.size());
2127     for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2128       Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2129     DTU->applyUpdates(Updates);
2130   }
2131   return NumInstrsRemoved;
2132 }
2133 
2134 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2135   SmallVector<Value *, 8> Args(II->args());
2136   SmallVector<OperandBundleDef, 1> OpBundles;
2137   II->getOperandBundlesAsDefs(OpBundles);
2138   CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2139                                        II->getCalledOperand(), Args, OpBundles);
2140   NewCall->setCallingConv(II->getCallingConv());
2141   NewCall->setAttributes(II->getAttributes());
2142   NewCall->setDebugLoc(II->getDebugLoc());
2143   NewCall->copyMetadata(*II);
2144 
2145   // If the invoke had profile metadata, try converting them for CallInst.
2146   uint64_t TotalWeight;
2147   if (NewCall->extractProfTotalWeight(TotalWeight)) {
2148     // Set the total weight if it fits into i32, otherwise reset.
2149     MDBuilder MDB(NewCall->getContext());
2150     auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2151                           ? nullptr
2152                           : MDB.createBranchWeights({uint32_t(TotalWeight)});
2153     NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2154   }
2155 
2156   return NewCall;
2157 }
2158 
2159 /// changeToCall - Convert the specified invoke into a normal call.
2160 void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2161   CallInst *NewCall = createCallMatchingInvoke(II);
2162   NewCall->takeName(II);
2163   NewCall->insertBefore(II);
2164   II->replaceAllUsesWith(NewCall);
2165 
2166   // Follow the call by a branch to the normal destination.
2167   BasicBlock *NormalDestBB = II->getNormalDest();
2168   BranchInst::Create(NormalDestBB, II);
2169 
2170   // Update PHI nodes in the unwind destination
2171   BasicBlock *BB = II->getParent();
2172   BasicBlock *UnwindDestBB = II->getUnwindDest();
2173   UnwindDestBB->removePredecessor(BB);
2174   II->eraseFromParent();
2175   if (DTU)
2176     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2177 }
2178 
2179 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2180                                                    BasicBlock *UnwindEdge,
2181                                                    DomTreeUpdater *DTU) {
2182   BasicBlock *BB = CI->getParent();
2183 
2184   // Convert this function call into an invoke instruction.  First, split the
2185   // basic block.
2186   BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2187                                  CI->getName() + ".noexc");
2188 
2189   // Delete the unconditional branch inserted by SplitBlock
2190   BB->getInstList().pop_back();
2191 
2192   // Create the new invoke instruction.
2193   SmallVector<Value *, 8> InvokeArgs(CI->args());
2194   SmallVector<OperandBundleDef, 1> OpBundles;
2195 
2196   CI->getOperandBundlesAsDefs(OpBundles);
2197 
2198   // Note: we're round tripping operand bundles through memory here, and that
2199   // can potentially be avoided with a cleverer API design that we do not have
2200   // as of this time.
2201 
2202   InvokeInst *II =
2203       InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
2204                          UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2205   II->setDebugLoc(CI->getDebugLoc());
2206   II->setCallingConv(CI->getCallingConv());
2207   II->setAttributes(CI->getAttributes());
2208 
2209   if (DTU)
2210     DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2211 
2212   // Make sure that anything using the call now uses the invoke!  This also
2213   // updates the CallGraph if present, because it uses a WeakTrackingVH.
2214   CI->replaceAllUsesWith(II);
2215 
2216   // Delete the original call
2217   Split->getInstList().pop_front();
2218   return Split;
2219 }
2220 
2221 static bool markAliveBlocks(Function &F,
2222                             SmallPtrSetImpl<BasicBlock *> &Reachable,
2223                             DomTreeUpdater *DTU = nullptr) {
2224   SmallVector<BasicBlock*, 128> Worklist;
2225   BasicBlock *BB = &F.front();
2226   Worklist.push_back(BB);
2227   Reachable.insert(BB);
2228   bool Changed = false;
2229   do {
2230     BB = Worklist.pop_back_val();
2231 
2232     // Do a quick scan of the basic block, turning any obviously unreachable
2233     // instructions into LLVM unreachable insts.  The instruction combining pass
2234     // canonicalizes unreachable insts into stores to null or undef.
2235     for (Instruction &I : *BB) {
2236       if (auto *CI = dyn_cast<CallInst>(&I)) {
2237         Value *Callee = CI->getCalledOperand();
2238         // Handle intrinsic calls.
2239         if (Function *F = dyn_cast<Function>(Callee)) {
2240           auto IntrinsicID = F->getIntrinsicID();
2241           // Assumptions that are known to be false are equivalent to
2242           // unreachable. Also, if the condition is undefined, then we make the
2243           // choice most beneficial to the optimizer, and choose that to also be
2244           // unreachable.
2245           if (IntrinsicID == Intrinsic::assume) {
2246             if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2247               // Don't insert a call to llvm.trap right before the unreachable.
2248               changeToUnreachable(CI, false, false, DTU);
2249               Changed = true;
2250               break;
2251             }
2252           } else if (IntrinsicID == Intrinsic::experimental_guard) {
2253             // A call to the guard intrinsic bails out of the current
2254             // compilation unit if the predicate passed to it is false. If the
2255             // predicate is a constant false, then we know the guard will bail
2256             // out of the current compile unconditionally, so all code following
2257             // it is dead.
2258             //
2259             // Note: unlike in llvm.assume, it is not "obviously profitable" for
2260             // guards to treat `undef` as `false` since a guard on `undef` can
2261             // still be useful for widening.
2262             if (match(CI->getArgOperand(0), m_Zero()))
2263               if (!isa<UnreachableInst>(CI->getNextNode())) {
2264                 changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false,
2265                                     false, DTU);
2266                 Changed = true;
2267                 break;
2268               }
2269           }
2270         } else if ((isa<ConstantPointerNull>(Callee) &&
2271                     !NullPointerIsDefined(CI->getFunction())) ||
2272                    isa<UndefValue>(Callee)) {
2273           changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DTU);
2274           Changed = true;
2275           break;
2276         }
2277         if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2278           // If we found a call to a no-return function, insert an unreachable
2279           // instruction after it.  Make sure there isn't *already* one there
2280           // though.
2281           if (!isa<UnreachableInst>(CI->getNextNode())) {
2282             // Don't insert a call to llvm.trap right before the unreachable.
2283             changeToUnreachable(CI->getNextNode(), false, false, DTU);
2284             Changed = true;
2285           }
2286           break;
2287         }
2288       } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2289         // Store to undef and store to null are undefined and used to signal
2290         // that they should be changed to unreachable by passes that can't
2291         // modify the CFG.
2292 
2293         // Don't touch volatile stores.
2294         if (SI->isVolatile()) continue;
2295 
2296         Value *Ptr = SI->getOperand(1);
2297 
2298         if (isa<UndefValue>(Ptr) ||
2299             (isa<ConstantPointerNull>(Ptr) &&
2300              !NullPointerIsDefined(SI->getFunction(),
2301                                    SI->getPointerAddressSpace()))) {
2302           changeToUnreachable(SI, true, false, DTU);
2303           Changed = true;
2304           break;
2305         }
2306       }
2307     }
2308 
2309     Instruction *Terminator = BB->getTerminator();
2310     if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2311       // Turn invokes that call 'nounwind' functions into ordinary calls.
2312       Value *Callee = II->getCalledOperand();
2313       if ((isa<ConstantPointerNull>(Callee) &&
2314            !NullPointerIsDefined(BB->getParent())) ||
2315           isa<UndefValue>(Callee)) {
2316         changeToUnreachable(II, true, false, DTU);
2317         Changed = true;
2318       } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2319         if (II->use_empty() && II->onlyReadsMemory()) {
2320           // jump to the normal destination branch.
2321           BasicBlock *NormalDestBB = II->getNormalDest();
2322           BasicBlock *UnwindDestBB = II->getUnwindDest();
2323           BranchInst::Create(NormalDestBB, II);
2324           UnwindDestBB->removePredecessor(II->getParent());
2325           II->eraseFromParent();
2326           if (DTU)
2327             DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2328         } else
2329           changeToCall(II, DTU);
2330         Changed = true;
2331       }
2332     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2333       // Remove catchpads which cannot be reached.
2334       struct CatchPadDenseMapInfo {
2335         static CatchPadInst *getEmptyKey() {
2336           return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2337         }
2338 
2339         static CatchPadInst *getTombstoneKey() {
2340           return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2341         }
2342 
2343         static unsigned getHashValue(CatchPadInst *CatchPad) {
2344           return static_cast<unsigned>(hash_combine_range(
2345               CatchPad->value_op_begin(), CatchPad->value_op_end()));
2346         }
2347 
2348         static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2349           if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2350               RHS == getEmptyKey() || RHS == getTombstoneKey())
2351             return LHS == RHS;
2352           return LHS->isIdenticalTo(RHS);
2353         }
2354       };
2355 
2356       SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2357       // Set of unique CatchPads.
2358       SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2359                     CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2360           HandlerSet;
2361       detail::DenseSetEmpty Empty;
2362       for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2363                                              E = CatchSwitch->handler_end();
2364            I != E; ++I) {
2365         BasicBlock *HandlerBB = *I;
2366         if (DTU)
2367           ++NumPerSuccessorCases[HandlerBB];
2368         auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
2369         if (!HandlerSet.insert({CatchPad, Empty}).second) {
2370           if (DTU)
2371             --NumPerSuccessorCases[HandlerBB];
2372           CatchSwitch->removeHandler(I);
2373           --I;
2374           --E;
2375           Changed = true;
2376         }
2377       }
2378       if (DTU) {
2379         std::vector<DominatorTree::UpdateType> Updates;
2380         for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2381           if (I.second == 0)
2382             Updates.push_back({DominatorTree::Delete, BB, I.first});
2383         DTU->applyUpdates(Updates);
2384       }
2385     }
2386 
2387     Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2388     for (BasicBlock *Successor : successors(BB))
2389       if (Reachable.insert(Successor).second)
2390         Worklist.push_back(Successor);
2391   } while (!Worklist.empty());
2392   return Changed;
2393 }
2394 
2395 void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
2396   Instruction *TI = BB->getTerminator();
2397 
2398   if (auto *II = dyn_cast<InvokeInst>(TI)) {
2399     changeToCall(II, DTU);
2400     return;
2401   }
2402 
2403   Instruction *NewTI;
2404   BasicBlock *UnwindDest;
2405 
2406   if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2407     NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
2408     UnwindDest = CRI->getUnwindDest();
2409   } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2410     auto *NewCatchSwitch = CatchSwitchInst::Create(
2411         CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2412         CatchSwitch->getName(), CatchSwitch);
2413     for (BasicBlock *PadBB : CatchSwitch->handlers())
2414       NewCatchSwitch->addHandler(PadBB);
2415 
2416     NewTI = NewCatchSwitch;
2417     UnwindDest = CatchSwitch->getUnwindDest();
2418   } else {
2419     llvm_unreachable("Could not find unwind successor");
2420   }
2421 
2422   NewTI->takeName(TI);
2423   NewTI->setDebugLoc(TI->getDebugLoc());
2424   UnwindDest->removePredecessor(BB);
2425   TI->replaceAllUsesWith(NewTI);
2426   TI->eraseFromParent();
2427   if (DTU)
2428     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2429 }
2430 
2431 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
2432 /// if they are in a dead cycle.  Return true if a change was made, false
2433 /// otherwise.
2434 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
2435                                    MemorySSAUpdater *MSSAU) {
2436   SmallPtrSet<BasicBlock *, 16> Reachable;
2437   bool Changed = markAliveBlocks(F, Reachable, DTU);
2438 
2439   // If there are unreachable blocks in the CFG...
2440   if (Reachable.size() == F.size())
2441     return Changed;
2442 
2443   assert(Reachable.size() < F.size());
2444 
2445   // Are there any blocks left to actually delete?
2446   SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2447   for (BasicBlock &BB : F) {
2448     // Skip reachable basic blocks
2449     if (Reachable.count(&BB))
2450       continue;
2451     // Skip already-deleted blocks
2452     if (DTU && DTU->isBBPendingDeletion(&BB))
2453       continue;
2454     BlocksToRemove.insert(&BB);
2455   }
2456 
2457   if (BlocksToRemove.empty())
2458     return Changed;
2459 
2460   Changed = true;
2461   NumRemoved += BlocksToRemove.size();
2462 
2463   if (MSSAU)
2464     MSSAU->removeBlocks(BlocksToRemove);
2465 
2466   // Loop over all of the basic blocks that are up for removal, dropping all of
2467   // their internal references. Update DTU if available.
2468   std::vector<DominatorTree::UpdateType> Updates;
2469   for (auto *BB : BlocksToRemove) {
2470     SmallSet<BasicBlock *, 8> UniqueSuccessors;
2471     for (BasicBlock *Successor : successors(BB)) {
2472       // Only remove references to BB in reachable successors of BB.
2473       if (Reachable.count(Successor))
2474         Successor->removePredecessor(BB);
2475       if (DTU)
2476         UniqueSuccessors.insert(Successor);
2477     }
2478     BB->dropAllReferences();
2479     if (DTU) {
2480       Instruction *TI = BB->getTerminator();
2481       assert(TI && "Basic block should have a terminator");
2482       // Terminators like invoke can have users. We have to replace their users,
2483       // before removing them.
2484       if (!TI->use_empty())
2485         TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
2486       TI->eraseFromParent();
2487       new UnreachableInst(BB->getContext(), BB);
2488       assert(succ_empty(BB) && "The successor list of BB isn't empty before "
2489                                "applying corresponding DTU updates.");
2490       Updates.reserve(Updates.size() + UniqueSuccessors.size());
2491       for (auto *UniqueSuccessor : UniqueSuccessors)
2492         Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2493     }
2494   }
2495 
2496   if (DTU) {
2497     DTU->applyUpdates(Updates);
2498     for (auto *BB : BlocksToRemove)
2499       DTU->deleteBB(BB);
2500   } else {
2501     for (auto *BB : BlocksToRemove)
2502       BB->eraseFromParent();
2503   }
2504 
2505   return Changed;
2506 }
2507 
2508 void llvm::combineMetadata(Instruction *K, const Instruction *J,
2509                            ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
2510   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2511   K->dropUnknownNonDebugMetadata(KnownIDs);
2512   K->getAllMetadataOtherThanDebugLoc(Metadata);
2513   for (const auto &MD : Metadata) {
2514     unsigned Kind = MD.first;
2515     MDNode *JMD = J->getMetadata(Kind);
2516     MDNode *KMD = MD.second;
2517 
2518     switch (Kind) {
2519       default:
2520         K->setMetadata(Kind, nullptr); // Remove unknown metadata
2521         break;
2522       case LLVMContext::MD_dbg:
2523         llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2524       case LLVMContext::MD_tbaa:
2525         K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2526         break;
2527       case LLVMContext::MD_alias_scope:
2528         K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2529         break;
2530       case LLVMContext::MD_noalias:
2531       case LLVMContext::MD_mem_parallel_loop_access:
2532         K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2533         break;
2534       case LLVMContext::MD_access_group:
2535         K->setMetadata(LLVMContext::MD_access_group,
2536                        intersectAccessGroups(K, J));
2537         break;
2538       case LLVMContext::MD_range:
2539 
2540         // If K does move, use most generic range. Otherwise keep the range of
2541         // K.
2542         if (DoesKMove)
2543           // FIXME: If K does move, we should drop the range info and nonnull.
2544           //        Currently this function is used with DoesKMove in passes
2545           //        doing hoisting/sinking and the current behavior of using the
2546           //        most generic range is correct in those cases.
2547           K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2548         break;
2549       case LLVMContext::MD_fpmath:
2550         K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2551         break;
2552       case LLVMContext::MD_invariant_load:
2553         // Only set the !invariant.load if it is present in both instructions.
2554         K->setMetadata(Kind, JMD);
2555         break;
2556       case LLVMContext::MD_nonnull:
2557         // If K does move, keep nonull if it is present in both instructions.
2558         if (DoesKMove)
2559           K->setMetadata(Kind, JMD);
2560         break;
2561       case LLVMContext::MD_invariant_group:
2562         // Preserve !invariant.group in K.
2563         break;
2564       case LLVMContext::MD_align:
2565         K->setMetadata(Kind,
2566           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2567         break;
2568       case LLVMContext::MD_dereferenceable:
2569       case LLVMContext::MD_dereferenceable_or_null:
2570         K->setMetadata(Kind,
2571           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2572         break;
2573       case LLVMContext::MD_preserve_access_index:
2574         // Preserve !preserve.access.index in K.
2575         break;
2576     }
2577   }
2578   // Set !invariant.group from J if J has it. If both instructions have it
2579   // then we will just pick it from J - even when they are different.
2580   // Also make sure that K is load or store - f.e. combining bitcast with load
2581   // could produce bitcast with invariant.group metadata, which is invalid.
2582   // FIXME: we should try to preserve both invariant.group md if they are
2583   // different, but right now instruction can only have one invariant.group.
2584   if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2585     if (isa<LoadInst>(K) || isa<StoreInst>(K))
2586       K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2587 }
2588 
2589 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
2590                                  bool KDominatesJ) {
2591   unsigned KnownIDs[] = {
2592       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2593       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2594       LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
2595       LLVMContext::MD_invariant_group, LLVMContext::MD_align,
2596       LLVMContext::MD_dereferenceable,
2597       LLVMContext::MD_dereferenceable_or_null,
2598       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2599   combineMetadata(K, J, KnownIDs, KDominatesJ);
2600 }
2601 
2602 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
2603   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
2604   Source.getAllMetadata(MD);
2605   MDBuilder MDB(Dest.getContext());
2606   Type *NewType = Dest.getType();
2607   const DataLayout &DL = Source.getModule()->getDataLayout();
2608   for (const auto &MDPair : MD) {
2609     unsigned ID = MDPair.first;
2610     MDNode *N = MDPair.second;
2611     // Note, essentially every kind of metadata should be preserved here! This
2612     // routine is supposed to clone a load instruction changing *only its type*.
2613     // The only metadata it makes sense to drop is metadata which is invalidated
2614     // when the pointer type changes. This should essentially never be the case
2615     // in LLVM, but we explicitly switch over only known metadata to be
2616     // conservatively correct. If you are adding metadata to LLVM which pertains
2617     // to loads, you almost certainly want to add it here.
2618     switch (ID) {
2619     case LLVMContext::MD_dbg:
2620     case LLVMContext::MD_tbaa:
2621     case LLVMContext::MD_prof:
2622     case LLVMContext::MD_fpmath:
2623     case LLVMContext::MD_tbaa_struct:
2624     case LLVMContext::MD_invariant_load:
2625     case LLVMContext::MD_alias_scope:
2626     case LLVMContext::MD_noalias:
2627     case LLVMContext::MD_nontemporal:
2628     case LLVMContext::MD_mem_parallel_loop_access:
2629     case LLVMContext::MD_access_group:
2630       // All of these directly apply.
2631       Dest.setMetadata(ID, N);
2632       break;
2633 
2634     case LLVMContext::MD_nonnull:
2635       copyNonnullMetadata(Source, N, Dest);
2636       break;
2637 
2638     case LLVMContext::MD_align:
2639     case LLVMContext::MD_dereferenceable:
2640     case LLVMContext::MD_dereferenceable_or_null:
2641       // These only directly apply if the new type is also a pointer.
2642       if (NewType->isPointerTy())
2643         Dest.setMetadata(ID, N);
2644       break;
2645 
2646     case LLVMContext::MD_range:
2647       copyRangeMetadata(DL, Source, N, Dest);
2648       break;
2649     }
2650   }
2651 }
2652 
2653 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
2654   auto *ReplInst = dyn_cast<Instruction>(Repl);
2655   if (!ReplInst)
2656     return;
2657 
2658   // Patch the replacement so that it is not more restrictive than the value
2659   // being replaced.
2660   // Note that if 'I' is a load being replaced by some operation,
2661   // for example, by an arithmetic operation, then andIRFlags()
2662   // would just erase all math flags from the original arithmetic
2663   // operation, which is clearly not wanted and not needed.
2664   if (!isa<LoadInst>(I))
2665     ReplInst->andIRFlags(I);
2666 
2667   // FIXME: If both the original and replacement value are part of the
2668   // same control-flow region (meaning that the execution of one
2669   // guarantees the execution of the other), then we can combine the
2670   // noalias scopes here and do better than the general conservative
2671   // answer used in combineMetadata().
2672 
2673   // In general, GVN unifies expressions over different control-flow
2674   // regions, and so we need a conservative combination of the noalias
2675   // scopes.
2676   static const unsigned KnownIDs[] = {
2677       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2678       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2679       LLVMContext::MD_fpmath,          LLVMContext::MD_invariant_load,
2680       LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull,
2681       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2682   combineMetadata(ReplInst, I, KnownIDs, false);
2683 }
2684 
2685 template <typename RootType, typename DominatesFn>
2686 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2687                                          const RootType &Root,
2688                                          const DominatesFn &Dominates) {
2689   assert(From->getType() == To->getType());
2690 
2691   unsigned Count = 0;
2692   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2693        UI != UE;) {
2694     Use &U = *UI++;
2695     if (!Dominates(Root, U))
2696       continue;
2697     U.set(To);
2698     LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()
2699                       << "' as " << *To << " in " << *U << "\n");
2700     ++Count;
2701   }
2702   return Count;
2703 }
2704 
2705 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2706    assert(From->getType() == To->getType());
2707    auto *BB = From->getParent();
2708    unsigned Count = 0;
2709 
2710   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2711        UI != UE;) {
2712     Use &U = *UI++;
2713     auto *I = cast<Instruction>(U.getUser());
2714     if (I->getParent() == BB)
2715       continue;
2716     U.set(To);
2717     ++Count;
2718   }
2719   return Count;
2720 }
2721 
2722 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2723                                         DominatorTree &DT,
2724                                         const BasicBlockEdge &Root) {
2725   auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2726     return DT.dominates(Root, U);
2727   };
2728   return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2729 }
2730 
2731 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2732                                         DominatorTree &DT,
2733                                         const BasicBlock *BB) {
2734   auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
2735     return DT.dominates(BB, U);
2736   };
2737   return ::replaceDominatedUsesWith(From, To, BB, Dominates);
2738 }
2739 
2740 bool llvm::callsGCLeafFunction(const CallBase *Call,
2741                                const TargetLibraryInfo &TLI) {
2742   // Check if the function is specifically marked as a gc leaf function.
2743   if (Call->hasFnAttr("gc-leaf-function"))
2744     return true;
2745   if (const Function *F = Call->getCalledFunction()) {
2746     if (F->hasFnAttribute("gc-leaf-function"))
2747       return true;
2748 
2749     if (auto IID = F->getIntrinsicID()) {
2750       // Most LLVM intrinsics do not take safepoints.
2751       return IID != Intrinsic::experimental_gc_statepoint &&
2752              IID != Intrinsic::experimental_deoptimize &&
2753              IID != Intrinsic::memcpy_element_unordered_atomic &&
2754              IID != Intrinsic::memmove_element_unordered_atomic;
2755     }
2756   }
2757 
2758   // Lib calls can be materialized by some passes, and won't be
2759   // marked as 'gc-leaf-function.' All available Libcalls are
2760   // GC-leaf.
2761   LibFunc LF;
2762   if (TLI.getLibFunc(*Call, LF)) {
2763     return TLI.has(LF);
2764   }
2765 
2766   return false;
2767 }
2768 
2769 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2770                                LoadInst &NewLI) {
2771   auto *NewTy = NewLI.getType();
2772 
2773   // This only directly applies if the new type is also a pointer.
2774   if (NewTy->isPointerTy()) {
2775     NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2776     return;
2777   }
2778 
2779   // The only other translation we can do is to integral loads with !range
2780   // metadata.
2781   if (!NewTy->isIntegerTy())
2782     return;
2783 
2784   MDBuilder MDB(NewLI.getContext());
2785   const Value *Ptr = OldLI.getPointerOperand();
2786   auto *ITy = cast<IntegerType>(NewTy);
2787   auto *NullInt = ConstantExpr::getPtrToInt(
2788       ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
2789   auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
2790   NewLI.setMetadata(LLVMContext::MD_range,
2791                     MDB.createRange(NonNullInt, NullInt));
2792 }
2793 
2794 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
2795                              MDNode *N, LoadInst &NewLI) {
2796   auto *NewTy = NewLI.getType();
2797 
2798   // Give up unless it is converted to a pointer where there is a single very
2799   // valuable mapping we can do reliably.
2800   // FIXME: It would be nice to propagate this in more ways, but the type
2801   // conversions make it hard.
2802   if (!NewTy->isPointerTy())
2803     return;
2804 
2805   unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
2806   if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
2807     MDNode *NN = MDNode::get(OldLI.getContext(), None);
2808     NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
2809   }
2810 }
2811 
2812 void llvm::dropDebugUsers(Instruction &I) {
2813   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2814   findDbgUsers(DbgUsers, &I);
2815   for (auto *DII : DbgUsers)
2816     DII->eraseFromParent();
2817 }
2818 
2819 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
2820                                     BasicBlock *BB) {
2821   // Since we are moving the instructions out of its basic block, we do not
2822   // retain their original debug locations (DILocations) and debug intrinsic
2823   // instructions.
2824   //
2825   // Doing so would degrade the debugging experience and adversely affect the
2826   // accuracy of profiling information.
2827   //
2828   // Currently, when hoisting the instructions, we take the following actions:
2829   // - Remove their debug intrinsic instructions.
2830   // - Set their debug locations to the values from the insertion point.
2831   //
2832   // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
2833   // need to be deleted, is because there will not be any instructions with a
2834   // DILocation in either branch left after performing the transformation. We
2835   // can only insert a dbg.value after the two branches are joined again.
2836   //
2837   // See PR38762, PR39243 for more details.
2838   //
2839   // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
2840   // encode predicated DIExpressions that yield different results on different
2841   // code paths.
2842 
2843   // A hoisted conditional probe should be treated as dangling so that it will
2844   // not be over-counted when the samples collected on the non-conditional path
2845   // are counted towards the conditional path. We leave it for the counts
2846   // inference algorithm to figure out a proper count for a danglng probe.
2847   moveAndDanglePseudoProbes(BB, InsertPt);
2848 
2849   for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
2850     Instruction *I = &*II;
2851     I->dropUnknownNonDebugMetadata();
2852     if (I->isUsedByMetadata())
2853       dropDebugUsers(*I);
2854     if (isa<DbgInfoIntrinsic>(I)) {
2855       // Remove DbgInfo Intrinsics.
2856       II = I->eraseFromParent();
2857       continue;
2858     }
2859     I->setDebugLoc(InsertPt->getDebugLoc());
2860     ++II;
2861   }
2862   DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(),
2863                                  BB->begin(),
2864                                  BB->getTerminator()->getIterator());
2865 }
2866 
2867 namespace {
2868 
2869 /// A potential constituent of a bitreverse or bswap expression. See
2870 /// collectBitParts for a fuller explanation.
2871 struct BitPart {
2872   BitPart(Value *P, unsigned BW) : Provider(P) {
2873     Provenance.resize(BW);
2874   }
2875 
2876   /// The Value that this is a bitreverse/bswap of.
2877   Value *Provider;
2878 
2879   /// The "provenance" of each bit. Provenance[A] = B means that bit A
2880   /// in Provider becomes bit B in the result of this expression.
2881   SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
2882 
2883   enum { Unset = -1 };
2884 };
2885 
2886 } // end anonymous namespace
2887 
2888 /// Analyze the specified subexpression and see if it is capable of providing
2889 /// pieces of a bswap or bitreverse. The subexpression provides a potential
2890 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
2891 /// the output of the expression came from a corresponding bit in some other
2892 /// value. This function is recursive, and the end result is a mapping of
2893 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
2894 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
2895 ///
2896 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
2897 /// that the expression deposits the low byte of %X into the high byte of the
2898 /// result and that all other bits are zero. This expression is accepted and a
2899 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2900 /// [0-7].
2901 ///
2902 /// For vector types, all analysis is performed at the per-element level. No
2903 /// cross-element analysis is supported (shuffle/insertion/reduction), and all
2904 /// constant masks must be splatted across all elements.
2905 ///
2906 /// To avoid revisiting values, the BitPart results are memoized into the
2907 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
2908 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2909 /// store BitParts objects, not pointers. As we need the concept of a nullptr
2910 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
2911 /// type instead to provide the same functionality.
2912 ///
2913 /// Because we pass around references into \c BPS, we must use a container that
2914 /// does not invalidate internal references (std::map instead of DenseMap).
2915 static const Optional<BitPart> &
2916 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2917                 std::map<Value *, Optional<BitPart>> &BPS, int Depth) {
2918   auto I = BPS.find(V);
2919   if (I != BPS.end())
2920     return I->second;
2921 
2922   auto &Result = BPS[V] = None;
2923   auto BitWidth = V->getType()->getScalarSizeInBits();
2924 
2925   // Can't do integer/elements > 128 bits.
2926   if (BitWidth > 128)
2927     return Result;
2928 
2929   // Prevent stack overflow by limiting the recursion depth
2930   if (Depth == BitPartRecursionMaxDepth) {
2931     LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
2932     return Result;
2933   }
2934 
2935   if (auto *I = dyn_cast<Instruction>(V)) {
2936     Value *X, *Y;
2937     const APInt *C;
2938 
2939     // If this is an or instruction, it may be an inner node of the bswap.
2940     if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
2941       const auto &A =
2942           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2943       const auto &B =
2944           collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2945       if (!A || !B)
2946         return Result;
2947 
2948       // Try and merge the two together.
2949       if (!A->Provider || A->Provider != B->Provider)
2950         return Result;
2951 
2952       Result = BitPart(A->Provider, BitWidth);
2953       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
2954         if (A->Provenance[BitIdx] != BitPart::Unset &&
2955             B->Provenance[BitIdx] != BitPart::Unset &&
2956             A->Provenance[BitIdx] != B->Provenance[BitIdx])
2957           return Result = None;
2958 
2959         if (A->Provenance[BitIdx] == BitPart::Unset)
2960           Result->Provenance[BitIdx] = B->Provenance[BitIdx];
2961         else
2962           Result->Provenance[BitIdx] = A->Provenance[BitIdx];
2963       }
2964 
2965       return Result;
2966     }
2967 
2968     // If this is a logical shift by a constant, recurse then shift the result.
2969     if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
2970       const APInt &BitShift = *C;
2971 
2972       // Ensure the shift amount is defined.
2973       if (BitShift.uge(BitWidth))
2974         return Result;
2975 
2976       const auto &Res =
2977           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2978       if (!Res)
2979         return Result;
2980       Result = Res;
2981 
2982       // Perform the "shift" on BitProvenance.
2983       auto &P = Result->Provenance;
2984       if (I->getOpcode() == Instruction::Shl) {
2985         P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
2986         P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
2987       } else {
2988         P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
2989         P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
2990       }
2991 
2992       return Result;
2993     }
2994 
2995     // If this is a logical 'and' with a mask that clears bits, recurse then
2996     // unset the appropriate bits.
2997     if (match(V, m_And(m_Value(X), m_APInt(C)))) {
2998       const APInt &AndMask = *C;
2999 
3000       // Check that the mask allows a multiple of 8 bits for a bswap, for an
3001       // early exit.
3002       unsigned NumMaskedBits = AndMask.countPopulation();
3003       if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3004         return Result;
3005 
3006       const auto &Res =
3007           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3008       if (!Res)
3009         return Result;
3010       Result = Res;
3011 
3012       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3013         // If the AndMask is zero for this bit, clear the bit.
3014         if (AndMask[BitIdx] == 0)
3015           Result->Provenance[BitIdx] = BitPart::Unset;
3016       return Result;
3017     }
3018 
3019     // If this is a zext instruction zero extend the result.
3020     if (match(V, m_ZExt(m_Value(X)))) {
3021       const auto &Res =
3022           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3023       if (!Res)
3024         return Result;
3025 
3026       Result = BitPart(Res->Provider, BitWidth);
3027       auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3028       for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3029         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3030       for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3031         Result->Provenance[BitIdx] = BitPart::Unset;
3032       return Result;
3033     }
3034 
3035     // If this is a truncate instruction, extract the lower bits.
3036     if (match(V, m_Trunc(m_Value(X)))) {
3037       const auto &Res =
3038           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3039       if (!Res)
3040         return Result;
3041 
3042       Result = BitPart(Res->Provider, BitWidth);
3043       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3044         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3045       return Result;
3046     }
3047 
3048     // BITREVERSE - most likely due to us previous matching a partial
3049     // bitreverse.
3050     if (match(V, m_BitReverse(m_Value(X)))) {
3051       const auto &Res =
3052           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3053       if (!Res)
3054         return Result;
3055 
3056       Result = BitPart(Res->Provider, BitWidth);
3057       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3058         Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3059       return Result;
3060     }
3061 
3062     // BSWAP - most likely due to us previous matching a partial bswap.
3063     if (match(V, m_BSwap(m_Value(X)))) {
3064       const auto &Res =
3065           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3066       if (!Res)
3067         return Result;
3068 
3069       unsigned ByteWidth = BitWidth / 8;
3070       Result = BitPart(Res->Provider, BitWidth);
3071       for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3072         unsigned ByteBitOfs = ByteIdx * 8;
3073         for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3074           Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3075               Res->Provenance[ByteBitOfs + BitIdx];
3076       }
3077       return Result;
3078     }
3079 
3080     // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3081     // amount (modulo).
3082     // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3083     // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3084     if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3085         match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3086       // We can treat fshr as a fshl by flipping the modulo amount.
3087       unsigned ModAmt = C->urem(BitWidth);
3088       if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3089         ModAmt = BitWidth - ModAmt;
3090 
3091       const auto &LHS =
3092           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3093       const auto &RHS =
3094           collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3095 
3096       // Check we have both sources and they are from the same provider.
3097       if (!LHS || !RHS || !LHS->Provider || LHS->Provider != RHS->Provider)
3098         return Result;
3099 
3100       unsigned StartBitRHS = BitWidth - ModAmt;
3101       Result = BitPart(LHS->Provider, BitWidth);
3102       for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3103         Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3104       for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3105         Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3106       return Result;
3107     }
3108   }
3109 
3110   // Okay, we got to something that isn't a shift, 'or' or 'and'.  This must be
3111   // the input value to the bswap/bitreverse.
3112   Result = BitPart(V, BitWidth);
3113   for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3114     Result->Provenance[BitIdx] = BitIdx;
3115   return Result;
3116 }
3117 
3118 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3119                                           unsigned BitWidth) {
3120   if (From % 8 != To % 8)
3121     return false;
3122   // Convert from bit indices to byte indices and check for a byte reversal.
3123   From >>= 3;
3124   To >>= 3;
3125   BitWidth >>= 3;
3126   return From == BitWidth - To - 1;
3127 }
3128 
3129 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3130                                                unsigned BitWidth) {
3131   return From == BitWidth - To - 1;
3132 }
3133 
3134 bool llvm::recognizeBSwapOrBitReverseIdiom(
3135     Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3136     SmallVectorImpl<Instruction *> &InsertedInsts) {
3137   if (Operator::getOpcode(I) != Instruction::Or)
3138     return false;
3139   if (!MatchBSwaps && !MatchBitReversals)
3140     return false;
3141   Type *ITy = I->getType();
3142   if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3143     return false;  // Can't do integer/elements > 128 bits.
3144 
3145   Type *DemandedTy = ITy;
3146   if (I->hasOneUse())
3147     if (auto *Trunc = dyn_cast<TruncInst>(I->user_back()))
3148       DemandedTy = Trunc->getType();
3149 
3150   // Try to find all the pieces corresponding to the bswap.
3151   std::map<Value *, Optional<BitPart>> BPS;
3152   auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0);
3153   if (!Res)
3154     return false;
3155   ArrayRef<int8_t> BitProvenance = Res->Provenance;
3156   assert(all_of(BitProvenance,
3157                 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3158          "Illegal bit provenance index");
3159 
3160   // If the upper bits are zero, then attempt to perform as a truncated op.
3161   if (BitProvenance.back() == BitPart::Unset) {
3162     while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3163       BitProvenance = BitProvenance.drop_back();
3164     if (BitProvenance.empty())
3165       return false; // TODO - handle null value?
3166     DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3167     if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3168       DemandedTy = VectorType::get(DemandedTy, IVecTy);
3169   }
3170 
3171   // Check BitProvenance hasn't found a source larger than the result type.
3172   unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3173   if (DemandedBW > ITy->getScalarSizeInBits())
3174     return false;
3175 
3176   // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3177   // only byteswap values with an even number of bytes.
3178   APInt DemandedMask = APInt::getAllOnesValue(DemandedBW);
3179   bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3180   bool OKForBitReverse = MatchBitReversals;
3181   for (unsigned BitIdx = 0;
3182        (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3183     if (BitProvenance[BitIdx] == BitPart::Unset) {
3184       DemandedMask.clearBit(BitIdx);
3185       continue;
3186     }
3187     OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3188                                                 DemandedBW);
3189     OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3190                                                           BitIdx, DemandedBW);
3191   }
3192 
3193   Intrinsic::ID Intrin;
3194   if (OKForBSwap)
3195     Intrin = Intrinsic::bswap;
3196   else if (OKForBitReverse)
3197     Intrin = Intrinsic::bitreverse;
3198   else
3199     return false;
3200 
3201   Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
3202   Value *Provider = Res->Provider;
3203 
3204   // We may need to truncate the provider.
3205   if (DemandedTy != Provider->getType()) {
3206     auto *Trunc =
3207         CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I);
3208     InsertedInsts.push_back(Trunc);
3209     Provider = Trunc;
3210   }
3211 
3212   Instruction *Result = CallInst::Create(F, Provider, "rev", I);
3213   InsertedInsts.push_back(Result);
3214 
3215   if (!DemandedMask.isAllOnesValue()) {
3216     auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3217     Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I);
3218     InsertedInsts.push_back(Result);
3219   }
3220 
3221   // We may need to zeroextend back to the result type.
3222   if (ITy != Result->getType()) {
3223     auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I);
3224     InsertedInsts.push_back(ExtInst);
3225   }
3226 
3227   return true;
3228 }
3229 
3230 // CodeGen has special handling for some string functions that may replace
3231 // them with target-specific intrinsics.  Since that'd skip our interceptors
3232 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3233 // we mark affected calls as NoBuiltin, which will disable optimization
3234 // in CodeGen.
3235 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
3236     CallInst *CI, const TargetLibraryInfo *TLI) {
3237   Function *F = CI->getCalledFunction();
3238   LibFunc Func;
3239   if (F && !F->hasLocalLinkage() && F->hasName() &&
3240       TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3241       !F->doesNotAccessMemory())
3242     CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
3243 }
3244 
3245 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
3246   // We can't have a PHI with a metadata type.
3247   if (I->getOperand(OpIdx)->getType()->isMetadataTy())
3248     return false;
3249 
3250   // Early exit.
3251   if (!isa<Constant>(I->getOperand(OpIdx)))
3252     return true;
3253 
3254   switch (I->getOpcode()) {
3255   default:
3256     return true;
3257   case Instruction::Call:
3258   case Instruction::Invoke: {
3259     const auto &CB = cast<CallBase>(*I);
3260 
3261     // Can't handle inline asm. Skip it.
3262     if (CB.isInlineAsm())
3263       return false;
3264 
3265     // Constant bundle operands may need to retain their constant-ness for
3266     // correctness.
3267     if (CB.isBundleOperand(OpIdx))
3268       return false;
3269 
3270     if (OpIdx < CB.getNumArgOperands()) {
3271       // Some variadic intrinsics require constants in the variadic arguments,
3272       // which currently aren't markable as immarg.
3273       if (isa<IntrinsicInst>(CB) &&
3274           OpIdx >= CB.getFunctionType()->getNumParams()) {
3275         // This is known to be OK for stackmap.
3276         return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3277       }
3278 
3279       // gcroot is a special case, since it requires a constant argument which
3280       // isn't also required to be a simple ConstantInt.
3281       if (CB.getIntrinsicID() == Intrinsic::gcroot)
3282         return false;
3283 
3284       // Some intrinsic operands are required to be immediates.
3285       return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3286     }
3287 
3288     // It is never allowed to replace the call argument to an intrinsic, but it
3289     // may be possible for a call.
3290     return !isa<IntrinsicInst>(CB);
3291   }
3292   case Instruction::ShuffleVector:
3293     // Shufflevector masks are constant.
3294     return OpIdx != 2;
3295   case Instruction::Switch:
3296   case Instruction::ExtractValue:
3297     // All operands apart from the first are constant.
3298     return OpIdx == 0;
3299   case Instruction::InsertValue:
3300     // All operands apart from the first and the second are constant.
3301     return OpIdx < 2;
3302   case Instruction::Alloca:
3303     // Static allocas (constant size in the entry block) are handled by
3304     // prologue/epilogue insertion so they're free anyway. We definitely don't
3305     // want to make them non-constant.
3306     return !cast<AllocaInst>(I)->isStaticAlloca();
3307   case Instruction::GetElementPtr:
3308     if (OpIdx == 0)
3309       return true;
3310     gep_type_iterator It = gep_type_begin(I);
3311     for (auto E = std::next(It, OpIdx); It != E; ++It)
3312       if (It.isStruct())
3313         return false;
3314     return true;
3315   }
3316 }
3317 
3318 Value *llvm::invertCondition(Value *Condition) {
3319   // First: Check if it's a constant
3320   if (Constant *C = dyn_cast<Constant>(Condition))
3321     return ConstantExpr::getNot(C);
3322 
3323   // Second: If the condition is already inverted, return the original value
3324   Value *NotCondition;
3325   if (match(Condition, m_Not(m_Value(NotCondition))))
3326     return NotCondition;
3327 
3328   BasicBlock *Parent = nullptr;
3329   Instruction *Inst = dyn_cast<Instruction>(Condition);
3330   if (Inst)
3331     Parent = Inst->getParent();
3332   else if (Argument *Arg = dyn_cast<Argument>(Condition))
3333     Parent = &Arg->getParent()->getEntryBlock();
3334   assert(Parent && "Unsupported condition to invert");
3335 
3336   // Third: Check all the users for an invert
3337   for (User *U : Condition->users())
3338     if (Instruction *I = dyn_cast<Instruction>(U))
3339       if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
3340         return I;
3341 
3342   // Last option: Create a new instruction
3343   auto *Inverted =
3344       BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
3345   if (Inst && !isa<PHINode>(Inst))
3346     Inverted->insertAfter(Inst);
3347   else
3348     Inverted->insertBefore(&*Parent->getFirstInsertionPt());
3349   return Inverted;
3350 }
3351 
3352 bool llvm::inferAttributesFromOthers(Function &F) {
3353   // Note: We explicitly check for attributes rather than using cover functions
3354   // because some of the cover functions include the logic being implemented.
3355 
3356   bool Changed = false;
3357   // readnone + not convergent implies nosync
3358   if (!F.hasFnAttribute(Attribute::NoSync) &&
3359       F.doesNotAccessMemory() && !F.isConvergent()) {
3360     F.setNoSync();
3361     Changed = true;
3362   }
3363 
3364   // readonly implies nofree
3365   if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
3366     F.setDoesNotFreeMemory();
3367     Changed = true;
3368   }
3369 
3370   // willreturn implies mustprogress
3371   if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
3372     F.setMustProgress();
3373     Changed = true;
3374   }
3375 
3376   // TODO: There are a bunch of cases of restrictive memory effects we
3377   // can infer by inspecting arguments of argmemonly-ish functions.
3378 
3379   return Changed;
3380 }
3381