1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Utils/Local.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/DomTreeUpdater.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/LazyValueInfo.h"
33 #include "llvm/Analysis/MemoryBuiltins.h"
34 #include "llvm/Analysis/MemorySSAUpdater.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/Analysis/VectorUtils.h"
38 #include "llvm/BinaryFormat/Dwarf.h"
39 #include "llvm/IR/Argument.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/IR/CFG.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/ConstantRange.h"
45 #include "llvm/IR/Constants.h"
46 #include "llvm/IR/DIBuilder.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Dominators.h"
52 #include "llvm/IR/Function.h"
53 #include "llvm/IR/GetElementPtrTypeIterator.h"
54 #include "llvm/IR/GlobalObject.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InstrTypes.h"
57 #include "llvm/IR/Instruction.h"
58 #include "llvm/IR/Instructions.h"
59 #include "llvm/IR/IntrinsicInst.h"
60 #include "llvm/IR/Intrinsics.h"
61 #include "llvm/IR/LLVMContext.h"
62 #include "llvm/IR/MDBuilder.h"
63 #include "llvm/IR/Metadata.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/Operator.h"
66 #include "llvm/IR/PatternMatch.h"
67 #include "llvm/IR/PseudoProbe.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/KnownBits.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
79 #include "llvm/Transforms/Utils/ValueMapper.h"
80 #include <algorithm>
81 #include <cassert>
82 #include <climits>
83 #include <cstdint>
84 #include <iterator>
85 #include <map>
86 #include <utility>
87 
88 using namespace llvm;
89 using namespace llvm::PatternMatch;
90 
91 #define DEBUG_TYPE "local"
92 
93 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95 
96 static cl::opt<bool> PHICSEDebugHash(
97     "phicse-debug-hash",
98 #ifdef EXPENSIVE_CHECKS
99     cl::init(true),
100 #else
101     cl::init(false),
102 #endif
103     cl::Hidden,
104     cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105              "function is well-behaved w.r.t. its isEqual predicate"));
106 
107 static cl::opt<unsigned> PHICSENumPHISmallSize(
108     "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
109     cl::desc(
110         "When the basic block contains not more than this number of PHI nodes, "
111         "perform a (faster!) exhaustive search instead of set-driven one."));
112 
113 // Max recursion depth for collectBitParts used when detecting bswap and
114 // bitreverse idioms.
115 static const unsigned BitPartRecursionMaxDepth = 48;
116 
117 //===----------------------------------------------------------------------===//
118 //  Local constant propagation.
119 //
120 
121 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
122 /// constant value, convert it into an unconditional branch to the constant
123 /// destination.  This is a nontrivial operation because the successors of this
124 /// basic block must have their PHI nodes updated.
125 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
126 /// conditions and indirectbr addresses this might make dead if
127 /// DeleteDeadConditions is true.
128 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
129                                   const TargetLibraryInfo *TLI,
130                                   DomTreeUpdater *DTU) {
131   Instruction *T = BB->getTerminator();
132   IRBuilder<> Builder(T);
133 
134   // Branch - See if we are conditional jumping on constant
135   if (auto *BI = dyn_cast<BranchInst>(T)) {
136     if (BI->isUnconditional()) return false;  // Can't optimize uncond branch
137 
138     BasicBlock *Dest1 = BI->getSuccessor(0);
139     BasicBlock *Dest2 = BI->getSuccessor(1);
140 
141     if (Dest2 == Dest1) {       // Conditional branch to same location?
142       // This branch matches something like this:
143       //     br bool %cond, label %Dest, label %Dest
144       // and changes it into:  br label %Dest
145 
146       // Let the basic block know that we are letting go of one copy of it.
147       assert(BI->getParent() && "Terminator not inserted in block!");
148       Dest1->removePredecessor(BI->getParent());
149 
150       // Replace the conditional branch with an unconditional one.
151       BranchInst *NewBI = Builder.CreateBr(Dest1);
152 
153       // Transfer the metadata to the new branch instruction.
154       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
155                                 LLVMContext::MD_annotation});
156 
157       Value *Cond = BI->getCondition();
158       BI->eraseFromParent();
159       if (DeleteDeadConditions)
160         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
161       return true;
162     }
163 
164     if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
165       // Are we branching on constant?
166       // YES.  Change to unconditional branch...
167       BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
168       BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
169 
170       // Let the basic block know that we are letting go of it.  Based on this,
171       // it will adjust it's PHI nodes.
172       OldDest->removePredecessor(BB);
173 
174       // Replace the conditional branch with an unconditional one.
175       BranchInst *NewBI = Builder.CreateBr(Destination);
176 
177       // Transfer the metadata to the new branch instruction.
178       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
179                                 LLVMContext::MD_annotation});
180 
181       BI->eraseFromParent();
182       if (DTU)
183         DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
184       return true;
185     }
186 
187     return false;
188   }
189 
190   if (auto *SI = dyn_cast<SwitchInst>(T)) {
191     // If we are switching on a constant, we can convert the switch to an
192     // unconditional branch.
193     auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
194     BasicBlock *DefaultDest = SI->getDefaultDest();
195     BasicBlock *TheOnlyDest = DefaultDest;
196 
197     // If the default is unreachable, ignore it when searching for TheOnlyDest.
198     if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
199         SI->getNumCases() > 0) {
200       TheOnlyDest = SI->case_begin()->getCaseSuccessor();
201     }
202 
203     bool Changed = false;
204 
205     // Figure out which case it goes to.
206     for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
207       // Found case matching a constant operand?
208       if (i->getCaseValue() == CI) {
209         TheOnlyDest = i->getCaseSuccessor();
210         break;
211       }
212 
213       // Check to see if this branch is going to the same place as the default
214       // dest.  If so, eliminate it as an explicit compare.
215       if (i->getCaseSuccessor() == DefaultDest) {
216         MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
217         unsigned NCases = SI->getNumCases();
218         // Fold the case metadata into the default if there will be any branches
219         // left, unless the metadata doesn't match the switch.
220         if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
221           // Collect branch weights into a vector.
222           SmallVector<uint32_t, 8> Weights;
223           for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
224                ++MD_i) {
225             auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
226             Weights.push_back(CI->getValue().getZExtValue());
227           }
228           // Merge weight of this case to the default weight.
229           unsigned idx = i->getCaseIndex();
230           Weights[0] += Weights[idx+1];
231           // Remove weight for this case.
232           std::swap(Weights[idx+1], Weights.back());
233           Weights.pop_back();
234           SI->setMetadata(LLVMContext::MD_prof,
235                           MDBuilder(BB->getContext()).
236                           createBranchWeights(Weights));
237         }
238         // Remove this entry.
239         BasicBlock *ParentBB = SI->getParent();
240         DefaultDest->removePredecessor(ParentBB);
241         i = SI->removeCase(i);
242         e = SI->case_end();
243         Changed = true;
244         continue;
245       }
246 
247       // Otherwise, check to see if the switch only branches to one destination.
248       // We do this by reseting "TheOnlyDest" to null when we find two non-equal
249       // destinations.
250       if (i->getCaseSuccessor() != TheOnlyDest)
251         TheOnlyDest = nullptr;
252 
253       // Increment this iterator as we haven't removed the case.
254       ++i;
255     }
256 
257     if (CI && !TheOnlyDest) {
258       // Branching on a constant, but not any of the cases, go to the default
259       // successor.
260       TheOnlyDest = SI->getDefaultDest();
261     }
262 
263     // If we found a single destination that we can fold the switch into, do so
264     // now.
265     if (TheOnlyDest) {
266       // Insert the new branch.
267       Builder.CreateBr(TheOnlyDest);
268       BasicBlock *BB = SI->getParent();
269 
270       SmallSet<BasicBlock *, 8> RemovedSuccessors;
271 
272       // Remove entries from PHI nodes which we no longer branch to...
273       BasicBlock *SuccToKeep = TheOnlyDest;
274       for (BasicBlock *Succ : successors(SI)) {
275         if (DTU && Succ != TheOnlyDest)
276           RemovedSuccessors.insert(Succ);
277         // Found case matching a constant operand?
278         if (Succ == SuccToKeep) {
279           SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
280         } else {
281           Succ->removePredecessor(BB);
282         }
283       }
284 
285       // Delete the old switch.
286       Value *Cond = SI->getCondition();
287       SI->eraseFromParent();
288       if (DeleteDeadConditions)
289         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
290       if (DTU) {
291         std::vector<DominatorTree::UpdateType> Updates;
292         Updates.reserve(RemovedSuccessors.size());
293         for (auto *RemovedSuccessor : RemovedSuccessors)
294           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
295         DTU->applyUpdates(Updates);
296       }
297       return true;
298     }
299 
300     if (SI->getNumCases() == 1) {
301       // Otherwise, we can fold this switch into a conditional branch
302       // instruction if it has only one non-default destination.
303       auto FirstCase = *SI->case_begin();
304       Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
305           FirstCase.getCaseValue(), "cond");
306 
307       // Insert the new branch.
308       BranchInst *NewBr = Builder.CreateCondBr(Cond,
309                                                FirstCase.getCaseSuccessor(),
310                                                SI->getDefaultDest());
311       MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
312       if (MD && MD->getNumOperands() == 3) {
313         ConstantInt *SICase =
314             mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
315         ConstantInt *SIDef =
316             mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
317         assert(SICase && SIDef);
318         // The TrueWeight should be the weight for the single case of SI.
319         NewBr->setMetadata(LLVMContext::MD_prof,
320                         MDBuilder(BB->getContext()).
321                         createBranchWeights(SICase->getValue().getZExtValue(),
322                                             SIDef->getValue().getZExtValue()));
323       }
324 
325       // Update make.implicit metadata to the newly-created conditional branch.
326       MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
327       if (MakeImplicitMD)
328         NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
329 
330       // Delete the old switch.
331       SI->eraseFromParent();
332       return true;
333     }
334     return Changed;
335   }
336 
337   if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
338     // indirectbr blockaddress(@F, @BB) -> br label @BB
339     if (auto *BA =
340           dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
341       BasicBlock *TheOnlyDest = BA->getBasicBlock();
342       SmallSet<BasicBlock *, 8> RemovedSuccessors;
343 
344       // Insert the new branch.
345       Builder.CreateBr(TheOnlyDest);
346 
347       BasicBlock *SuccToKeep = TheOnlyDest;
348       for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
349         BasicBlock *DestBB = IBI->getDestination(i);
350         if (DTU && DestBB != TheOnlyDest)
351           RemovedSuccessors.insert(DestBB);
352         if (IBI->getDestination(i) == SuccToKeep) {
353           SuccToKeep = nullptr;
354         } else {
355           DestBB->removePredecessor(BB);
356         }
357       }
358       Value *Address = IBI->getAddress();
359       IBI->eraseFromParent();
360       if (DeleteDeadConditions)
361         // Delete pointer cast instructions.
362         RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
363 
364       // Also zap the blockaddress constant if there are no users remaining,
365       // otherwise the destination is still marked as having its address taken.
366       if (BA->use_empty())
367         BA->destroyConstant();
368 
369       // If we didn't find our destination in the IBI successor list, then we
370       // have undefined behavior.  Replace the unconditional branch with an
371       // 'unreachable' instruction.
372       if (SuccToKeep) {
373         BB->getTerminator()->eraseFromParent();
374         new UnreachableInst(BB->getContext(), BB);
375       }
376 
377       if (DTU) {
378         std::vector<DominatorTree::UpdateType> Updates;
379         Updates.reserve(RemovedSuccessors.size());
380         for (auto *RemovedSuccessor : RemovedSuccessors)
381           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
382         DTU->applyUpdates(Updates);
383       }
384       return true;
385     }
386   }
387 
388   return false;
389 }
390 
391 //===----------------------------------------------------------------------===//
392 //  Local dead code elimination.
393 //
394 
395 /// isInstructionTriviallyDead - Return true if the result produced by the
396 /// instruction is not used, and the instruction has no side effects.
397 ///
398 bool llvm::isInstructionTriviallyDead(Instruction *I,
399                                       const TargetLibraryInfo *TLI) {
400   if (!I->use_empty())
401     return false;
402   return wouldInstructionBeTriviallyDead(I, TLI);
403 }
404 
405 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
406                                            const TargetLibraryInfo *TLI) {
407   if (I->isTerminator())
408     return false;
409 
410   // We don't want the landingpad-like instructions removed by anything this
411   // general.
412   if (I->isEHPad())
413     return false;
414 
415   // We don't want debug info removed by anything this general, unless
416   // debug info is empty.
417   if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
418     if (DDI->getAddress())
419       return false;
420     return true;
421   }
422   if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
423     if (DVI->hasArgList() || DVI->getValue(0))
424       return false;
425     return true;
426   }
427   if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
428     if (DLI->getLabel())
429       return false;
430     return true;
431   }
432 
433   if (!I->willReturn())
434     return false;
435 
436   if (!I->mayHaveSideEffects())
437     return true;
438 
439   // Special case intrinsics that "may have side effects" but can be deleted
440   // when dead.
441   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
442     // Safe to delete llvm.stacksave and launder.invariant.group if dead.
443     if (II->getIntrinsicID() == Intrinsic::stacksave ||
444         II->getIntrinsicID() == Intrinsic::launder_invariant_group)
445       return true;
446 
447     if (II->isLifetimeStartOrEnd()) {
448       auto *Arg = II->getArgOperand(1);
449       // Lifetime intrinsics are dead when their right-hand is undef.
450       if (isa<UndefValue>(Arg))
451         return true;
452       // If the right-hand is an alloc, global, or argument and the only uses
453       // are lifetime intrinsics then the intrinsics are dead.
454       if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
455         return llvm::all_of(Arg->uses(), [](Use &Use) {
456           if (IntrinsicInst *IntrinsicUse =
457                   dyn_cast<IntrinsicInst>(Use.getUser()))
458             return IntrinsicUse->isLifetimeStartOrEnd();
459           return false;
460         });
461       return false;
462     }
463 
464     // Assumptions are dead if their condition is trivially true.  Guards on
465     // true are operationally no-ops.  In the future we can consider more
466     // sophisticated tradeoffs for guards considering potential for check
467     // widening, but for now we keep things simple.
468     if ((II->getIntrinsicID() == Intrinsic::assume &&
469          isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) ||
470         II->getIntrinsicID() == Intrinsic::experimental_guard) {
471       if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
472         return !Cond->isZero();
473 
474       return false;
475     }
476 
477     if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
478       Optional<fp::ExceptionBehavior> ExBehavior = FPI->getExceptionBehavior();
479       return ExBehavior.getValue() != fp::ebStrict;
480     }
481   }
482 
483   if (isAllocLikeFn(I, TLI))
484     return true;
485 
486   if (CallInst *CI = isFreeCall(I, TLI))
487     if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
488       return C->isNullValue() || isa<UndefValue>(C);
489 
490   if (auto *Call = dyn_cast<CallBase>(I))
491     if (isMathLibCallNoop(Call, TLI))
492       return true;
493 
494   // To express possible interaction with floating point environment constrained
495   // intrinsics are described as if they access memory. So they look like having
496   // side effect but actually do not have it unless they raise floating point
497   // exception. If FP exceptions are ignored, the intrinsic may be deleted.
498   if (auto *CI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
499     Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
500     if (!EB || *EB == fp::ExceptionBehavior::ebIgnore)
501       return true;
502   }
503 
504   return false;
505 }
506 
507 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
508 /// trivially dead instruction, delete it.  If that makes any of its operands
509 /// trivially dead, delete them too, recursively.  Return true if any
510 /// instructions were deleted.
511 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
512     Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
513     std::function<void(Value *)> AboutToDeleteCallback) {
514   Instruction *I = dyn_cast<Instruction>(V);
515   if (!I || !isInstructionTriviallyDead(I, TLI))
516     return false;
517 
518   SmallVector<WeakTrackingVH, 16> DeadInsts;
519   DeadInsts.push_back(I);
520   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
521                                              AboutToDeleteCallback);
522 
523   return true;
524 }
525 
526 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
527     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
528     MemorySSAUpdater *MSSAU,
529     std::function<void(Value *)> AboutToDeleteCallback) {
530   unsigned S = 0, E = DeadInsts.size(), Alive = 0;
531   for (; S != E; ++S) {
532     auto *I = cast<Instruction>(DeadInsts[S]);
533     if (!isInstructionTriviallyDead(I)) {
534       DeadInsts[S] = nullptr;
535       ++Alive;
536     }
537   }
538   if (Alive == E)
539     return false;
540   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
541                                              AboutToDeleteCallback);
542   return true;
543 }
544 
545 void llvm::RecursivelyDeleteTriviallyDeadInstructions(
546     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
547     MemorySSAUpdater *MSSAU,
548     std::function<void(Value *)> AboutToDeleteCallback) {
549   // Process the dead instruction list until empty.
550   while (!DeadInsts.empty()) {
551     Value *V = DeadInsts.pop_back_val();
552     Instruction *I = cast_or_null<Instruction>(V);
553     if (!I)
554       continue;
555     assert(isInstructionTriviallyDead(I, TLI) &&
556            "Live instruction found in dead worklist!");
557     assert(I->use_empty() && "Instructions with uses are not dead.");
558 
559     // Don't lose the debug info while deleting the instructions.
560     salvageDebugInfo(*I);
561 
562     if (AboutToDeleteCallback)
563       AboutToDeleteCallback(I);
564 
565     // Null out all of the instruction's operands to see if any operand becomes
566     // dead as we go.
567     for (Use &OpU : I->operands()) {
568       Value *OpV = OpU.get();
569       OpU.set(nullptr);
570 
571       if (!OpV->use_empty())
572         continue;
573 
574       // If the operand is an instruction that became dead as we nulled out the
575       // operand, and if it is 'trivially' dead, delete it in a future loop
576       // iteration.
577       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
578         if (isInstructionTriviallyDead(OpI, TLI))
579           DeadInsts.push_back(OpI);
580     }
581     if (MSSAU)
582       MSSAU->removeMemoryAccess(I);
583 
584     I->eraseFromParent();
585   }
586 }
587 
588 bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
589   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
590   findDbgUsers(DbgUsers, I);
591   for (auto *DII : DbgUsers) {
592     Value *Undef = UndefValue::get(I->getType());
593     DII->replaceVariableLocationOp(I, Undef);
594   }
595   return !DbgUsers.empty();
596 }
597 
598 /// areAllUsesEqual - Check whether the uses of a value are all the same.
599 /// This is similar to Instruction::hasOneUse() except this will also return
600 /// true when there are no uses or multiple uses that all refer to the same
601 /// value.
602 static bool areAllUsesEqual(Instruction *I) {
603   Value::user_iterator UI = I->user_begin();
604   Value::user_iterator UE = I->user_end();
605   if (UI == UE)
606     return true;
607 
608   User *TheUse = *UI;
609   for (++UI; UI != UE; ++UI) {
610     if (*UI != TheUse)
611       return false;
612   }
613   return true;
614 }
615 
616 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
617 /// dead PHI node, due to being a def-use chain of single-use nodes that
618 /// either forms a cycle or is terminated by a trivially dead instruction,
619 /// delete it.  If that makes any of its operands trivially dead, delete them
620 /// too, recursively.  Return true if a change was made.
621 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
622                                         const TargetLibraryInfo *TLI,
623                                         llvm::MemorySSAUpdater *MSSAU) {
624   SmallPtrSet<Instruction*, 4> Visited;
625   for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
626        I = cast<Instruction>(*I->user_begin())) {
627     if (I->use_empty())
628       return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
629 
630     // If we find an instruction more than once, we're on a cycle that
631     // won't prove fruitful.
632     if (!Visited.insert(I).second) {
633       // Break the cycle and delete the instruction and its operands.
634       I->replaceAllUsesWith(UndefValue::get(I->getType()));
635       (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
636       return true;
637     }
638   }
639   return false;
640 }
641 
642 static bool
643 simplifyAndDCEInstruction(Instruction *I,
644                           SmallSetVector<Instruction *, 16> &WorkList,
645                           const DataLayout &DL,
646                           const TargetLibraryInfo *TLI) {
647   if (isInstructionTriviallyDead(I, TLI)) {
648     salvageDebugInfo(*I);
649 
650     // Null out all of the instruction's operands to see if any operand becomes
651     // dead as we go.
652     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
653       Value *OpV = I->getOperand(i);
654       I->setOperand(i, nullptr);
655 
656       if (!OpV->use_empty() || I == OpV)
657         continue;
658 
659       // If the operand is an instruction that became dead as we nulled out the
660       // operand, and if it is 'trivially' dead, delete it in a future loop
661       // iteration.
662       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
663         if (isInstructionTriviallyDead(OpI, TLI))
664           WorkList.insert(OpI);
665     }
666 
667     I->eraseFromParent();
668 
669     return true;
670   }
671 
672   if (Value *SimpleV = SimplifyInstruction(I, DL)) {
673     // Add the users to the worklist. CAREFUL: an instruction can use itself,
674     // in the case of a phi node.
675     for (User *U : I->users()) {
676       if (U != I) {
677         WorkList.insert(cast<Instruction>(U));
678       }
679     }
680 
681     // Replace the instruction with its simplified value.
682     bool Changed = false;
683     if (!I->use_empty()) {
684       I->replaceAllUsesWith(SimpleV);
685       Changed = true;
686     }
687     if (isInstructionTriviallyDead(I, TLI)) {
688       I->eraseFromParent();
689       Changed = true;
690     }
691     return Changed;
692   }
693   return false;
694 }
695 
696 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
697 /// simplify any instructions in it and recursively delete dead instructions.
698 ///
699 /// This returns true if it changed the code, note that it can delete
700 /// instructions in other blocks as well in this block.
701 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
702                                        const TargetLibraryInfo *TLI) {
703   bool MadeChange = false;
704   const DataLayout &DL = BB->getModule()->getDataLayout();
705 
706 #ifndef NDEBUG
707   // In debug builds, ensure that the terminator of the block is never replaced
708   // or deleted by these simplifications. The idea of simplification is that it
709   // cannot introduce new instructions, and there is no way to replace the
710   // terminator of a block without introducing a new instruction.
711   AssertingVH<Instruction> TerminatorVH(&BB->back());
712 #endif
713 
714   SmallSetVector<Instruction *, 16> WorkList;
715   // Iterate over the original function, only adding insts to the worklist
716   // if they actually need to be revisited. This avoids having to pre-init
717   // the worklist with the entire function's worth of instructions.
718   for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
719        BI != E;) {
720     assert(!BI->isTerminator());
721     Instruction *I = &*BI;
722     ++BI;
723 
724     // We're visiting this instruction now, so make sure it's not in the
725     // worklist from an earlier visit.
726     if (!WorkList.count(I))
727       MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
728   }
729 
730   while (!WorkList.empty()) {
731     Instruction *I = WorkList.pop_back_val();
732     MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
733   }
734   return MadeChange;
735 }
736 
737 //===----------------------------------------------------------------------===//
738 //  Control Flow Graph Restructuring.
739 //
740 
741 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
742                                        DomTreeUpdater *DTU) {
743 
744   // If BB has single-entry PHI nodes, fold them.
745   while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
746     Value *NewVal = PN->getIncomingValue(0);
747     // Replace self referencing PHI with undef, it must be dead.
748     if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
749     PN->replaceAllUsesWith(NewVal);
750     PN->eraseFromParent();
751   }
752 
753   BasicBlock *PredBB = DestBB->getSinglePredecessor();
754   assert(PredBB && "Block doesn't have a single predecessor!");
755 
756   bool ReplaceEntryBB = PredBB->isEntryBlock();
757 
758   // DTU updates: Collect all the edges that enter
759   // PredBB. These dominator edges will be redirected to DestBB.
760   SmallVector<DominatorTree::UpdateType, 32> Updates;
761 
762   if (DTU) {
763     SmallPtrSet<BasicBlock *, 2> PredsOfPredBB(pred_begin(PredBB),
764                                                pred_end(PredBB));
765     Updates.reserve(Updates.size() + 2 * PredsOfPredBB.size() + 1);
766     for (BasicBlock *PredOfPredBB : PredsOfPredBB)
767       // This predecessor of PredBB may already have DestBB as a successor.
768       if (PredOfPredBB != PredBB)
769         Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
770     for (BasicBlock *PredOfPredBB : PredsOfPredBB)
771       Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
772     Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
773   }
774 
775   // Zap anything that took the address of DestBB.  Not doing this will give the
776   // address an invalid value.
777   if (DestBB->hasAddressTaken()) {
778     BlockAddress *BA = BlockAddress::get(DestBB);
779     Constant *Replacement =
780       ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
781     BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
782                                                      BA->getType()));
783     BA->destroyConstant();
784   }
785 
786   // Anything that branched to PredBB now branches to DestBB.
787   PredBB->replaceAllUsesWith(DestBB);
788 
789   // Splice all the instructions from PredBB to DestBB.
790   PredBB->getTerminator()->eraseFromParent();
791   DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
792   new UnreachableInst(PredBB->getContext(), PredBB);
793 
794   // If the PredBB is the entry block of the function, move DestBB up to
795   // become the entry block after we erase PredBB.
796   if (ReplaceEntryBB)
797     DestBB->moveAfter(PredBB);
798 
799   if (DTU) {
800     assert(PredBB->getInstList().size() == 1 &&
801            isa<UnreachableInst>(PredBB->getTerminator()) &&
802            "The successor list of PredBB isn't empty before "
803            "applying corresponding DTU updates.");
804     DTU->applyUpdatesPermissive(Updates);
805     DTU->deleteBB(PredBB);
806     // Recalculation of DomTree is needed when updating a forward DomTree and
807     // the Entry BB is replaced.
808     if (ReplaceEntryBB && DTU->hasDomTree()) {
809       // The entry block was removed and there is no external interface for
810       // the dominator tree to be notified of this change. In this corner-case
811       // we recalculate the entire tree.
812       DTU->recalculate(*(DestBB->getParent()));
813     }
814   }
815 
816   else {
817     PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
818   }
819 }
820 
821 /// Return true if we can choose one of these values to use in place of the
822 /// other. Note that we will always choose the non-undef value to keep.
823 static bool CanMergeValues(Value *First, Value *Second) {
824   return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
825 }
826 
827 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional
828 /// branch to Succ, into Succ.
829 ///
830 /// Assumption: Succ is the single successor for BB.
831 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
832   assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
833 
834   LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
835                     << Succ->getName() << "\n");
836   // Shortcut, if there is only a single predecessor it must be BB and merging
837   // is always safe
838   if (Succ->getSinglePredecessor()) return true;
839 
840   // Make a list of the predecessors of BB
841   SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
842 
843   // Look at all the phi nodes in Succ, to see if they present a conflict when
844   // merging these blocks
845   for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
846     PHINode *PN = cast<PHINode>(I);
847 
848     // If the incoming value from BB is again a PHINode in
849     // BB which has the same incoming value for *PI as PN does, we can
850     // merge the phi nodes and then the blocks can still be merged
851     PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
852     if (BBPN && BBPN->getParent() == BB) {
853       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
854         BasicBlock *IBB = PN->getIncomingBlock(PI);
855         if (BBPreds.count(IBB) &&
856             !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
857                             PN->getIncomingValue(PI))) {
858           LLVM_DEBUG(dbgs()
859                      << "Can't fold, phi node " << PN->getName() << " in "
860                      << Succ->getName() << " is conflicting with "
861                      << BBPN->getName() << " with regard to common predecessor "
862                      << IBB->getName() << "\n");
863           return false;
864         }
865       }
866     } else {
867       Value* Val = PN->getIncomingValueForBlock(BB);
868       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
869         // See if the incoming value for the common predecessor is equal to the
870         // one for BB, in which case this phi node will not prevent the merging
871         // of the block.
872         BasicBlock *IBB = PN->getIncomingBlock(PI);
873         if (BBPreds.count(IBB) &&
874             !CanMergeValues(Val, PN->getIncomingValue(PI))) {
875           LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
876                             << " in " << Succ->getName()
877                             << " is conflicting with regard to common "
878                             << "predecessor " << IBB->getName() << "\n");
879           return false;
880         }
881       }
882     }
883   }
884 
885   return true;
886 }
887 
888 using PredBlockVector = SmallVector<BasicBlock *, 16>;
889 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
890 
891 /// Determines the value to use as the phi node input for a block.
892 ///
893 /// Select between \p OldVal any value that we know flows from \p BB
894 /// to a particular phi on the basis of which one (if either) is not
895 /// undef. Update IncomingValues based on the selected value.
896 ///
897 /// \param OldVal The value we are considering selecting.
898 /// \param BB The block that the value flows in from.
899 /// \param IncomingValues A map from block-to-value for other phi inputs
900 /// that we have examined.
901 ///
902 /// \returns the selected value.
903 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
904                                           IncomingValueMap &IncomingValues) {
905   if (!isa<UndefValue>(OldVal)) {
906     assert((!IncomingValues.count(BB) ||
907             IncomingValues.find(BB)->second == OldVal) &&
908            "Expected OldVal to match incoming value from BB!");
909 
910     IncomingValues.insert(std::make_pair(BB, OldVal));
911     return OldVal;
912   }
913 
914   IncomingValueMap::const_iterator It = IncomingValues.find(BB);
915   if (It != IncomingValues.end()) return It->second;
916 
917   return OldVal;
918 }
919 
920 /// Create a map from block to value for the operands of a
921 /// given phi.
922 ///
923 /// Create a map from block to value for each non-undef value flowing
924 /// into \p PN.
925 ///
926 /// \param PN The phi we are collecting the map for.
927 /// \param IncomingValues [out] The map from block to value for this phi.
928 static void gatherIncomingValuesToPhi(PHINode *PN,
929                                       IncomingValueMap &IncomingValues) {
930   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
931     BasicBlock *BB = PN->getIncomingBlock(i);
932     Value *V = PN->getIncomingValue(i);
933 
934     if (!isa<UndefValue>(V))
935       IncomingValues.insert(std::make_pair(BB, V));
936   }
937 }
938 
939 /// Replace the incoming undef values to a phi with the values
940 /// from a block-to-value map.
941 ///
942 /// \param PN The phi we are replacing the undefs in.
943 /// \param IncomingValues A map from block to value.
944 static void replaceUndefValuesInPhi(PHINode *PN,
945                                     const IncomingValueMap &IncomingValues) {
946   SmallVector<unsigned> TrueUndefOps;
947   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
948     Value *V = PN->getIncomingValue(i);
949 
950     if (!isa<UndefValue>(V)) continue;
951 
952     BasicBlock *BB = PN->getIncomingBlock(i);
953     IncomingValueMap::const_iterator It = IncomingValues.find(BB);
954 
955     // Keep track of undef/poison incoming values. Those must match, so we fix
956     // them up below if needed.
957     // Note: this is conservatively correct, but we could try harder and group
958     // the undef values per incoming basic block.
959     if (It == IncomingValues.end()) {
960       TrueUndefOps.push_back(i);
961       continue;
962     }
963 
964     // There is a defined value for this incoming block, so map this undef
965     // incoming value to the defined value.
966     PN->setIncomingValue(i, It->second);
967   }
968 
969   // If there are both undef and poison values incoming, then convert those
970   // values to undef. It is invalid to have different values for the same
971   // incoming block.
972   unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
973     return isa<PoisonValue>(PN->getIncomingValue(i));
974   });
975   if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
976     for (unsigned i : TrueUndefOps)
977       PN->setIncomingValue(i, UndefValue::get(PN->getType()));
978   }
979 }
980 
981 /// Replace a value flowing from a block to a phi with
982 /// potentially multiple instances of that value flowing from the
983 /// block's predecessors to the phi.
984 ///
985 /// \param BB The block with the value flowing into the phi.
986 /// \param BBPreds The predecessors of BB.
987 /// \param PN The phi that we are updating.
988 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
989                                                 const PredBlockVector &BBPreds,
990                                                 PHINode *PN) {
991   Value *OldVal = PN->removeIncomingValue(BB, false);
992   assert(OldVal && "No entry in PHI for Pred BB!");
993 
994   IncomingValueMap IncomingValues;
995 
996   // We are merging two blocks - BB, and the block containing PN - and
997   // as a result we need to redirect edges from the predecessors of BB
998   // to go to the block containing PN, and update PN
999   // accordingly. Since we allow merging blocks in the case where the
1000   // predecessor and successor blocks both share some predecessors,
1001   // and where some of those common predecessors might have undef
1002   // values flowing into PN, we want to rewrite those values to be
1003   // consistent with the non-undef values.
1004 
1005   gatherIncomingValuesToPhi(PN, IncomingValues);
1006 
1007   // If this incoming value is one of the PHI nodes in BB, the new entries
1008   // in the PHI node are the entries from the old PHI.
1009   if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1010     PHINode *OldValPN = cast<PHINode>(OldVal);
1011     for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1012       // Note that, since we are merging phi nodes and BB and Succ might
1013       // have common predecessors, we could end up with a phi node with
1014       // identical incoming branches. This will be cleaned up later (and
1015       // will trigger asserts if we try to clean it up now, without also
1016       // simplifying the corresponding conditional branch).
1017       BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1018       Value *PredVal = OldValPN->getIncomingValue(i);
1019       Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
1020                                                     IncomingValues);
1021 
1022       // And add a new incoming value for this predecessor for the
1023       // newly retargeted branch.
1024       PN->addIncoming(Selected, PredBB);
1025     }
1026   } else {
1027     for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1028       // Update existing incoming values in PN for this
1029       // predecessor of BB.
1030       BasicBlock *PredBB = BBPreds[i];
1031       Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
1032                                                     IncomingValues);
1033 
1034       // And add a new incoming value for this predecessor for the
1035       // newly retargeted branch.
1036       PN->addIncoming(Selected, PredBB);
1037     }
1038   }
1039 
1040   replaceUndefValuesInPhi(PN, IncomingValues);
1041 }
1042 
1043 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1044                                                    DomTreeUpdater *DTU) {
1045   assert(BB != &BB->getParent()->getEntryBlock() &&
1046          "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1047 
1048   // We can't eliminate infinite loops.
1049   BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1050   if (BB == Succ) return false;
1051 
1052   // Check to see if merging these blocks would cause conflicts for any of the
1053   // phi nodes in BB or Succ. If not, we can safely merge.
1054   if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
1055 
1056   // Check for cases where Succ has multiple predecessors and a PHI node in BB
1057   // has uses which will not disappear when the PHI nodes are merged.  It is
1058   // possible to handle such cases, but difficult: it requires checking whether
1059   // BB dominates Succ, which is non-trivial to calculate in the case where
1060   // Succ has multiple predecessors.  Also, it requires checking whether
1061   // constructing the necessary self-referential PHI node doesn't introduce any
1062   // conflicts; this isn't too difficult, but the previous code for doing this
1063   // was incorrect.
1064   //
1065   // Note that if this check finds a live use, BB dominates Succ, so BB is
1066   // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1067   // folding the branch isn't profitable in that case anyway.
1068   if (!Succ->getSinglePredecessor()) {
1069     BasicBlock::iterator BBI = BB->begin();
1070     while (isa<PHINode>(*BBI)) {
1071       for (Use &U : BBI->uses()) {
1072         if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1073           if (PN->getIncomingBlock(U) != BB)
1074             return false;
1075         } else {
1076           return false;
1077         }
1078       }
1079       ++BBI;
1080     }
1081   }
1082 
1083   // We cannot fold the block if it's a branch to an already present callbr
1084   // successor because that creates duplicate successors.
1085   for (BasicBlock *PredBB : predecessors(BB)) {
1086     if (auto *CBI = dyn_cast<CallBrInst>(PredBB->getTerminator())) {
1087       if (Succ == CBI->getDefaultDest())
1088         return false;
1089       for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i)
1090         if (Succ == CBI->getIndirectDest(i))
1091           return false;
1092     }
1093   }
1094 
1095   LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1096 
1097   SmallVector<DominatorTree::UpdateType, 32> Updates;
1098   if (DTU) {
1099     // All predecessors of BB will be moved to Succ.
1100     SmallPtrSet<BasicBlock *, 8> PredsOfBB(pred_begin(BB), pred_end(BB));
1101     SmallPtrSet<BasicBlock *, 8> PredsOfSucc(pred_begin(Succ), pred_end(Succ));
1102     Updates.reserve(Updates.size() + 2 * PredsOfBB.size() + 1);
1103     for (auto *PredOfBB : PredsOfBB)
1104       // This predecessor of BB may already have Succ as a successor.
1105       if (!PredsOfSucc.contains(PredOfBB))
1106         Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1107     for (auto *PredOfBB : PredsOfBB)
1108       Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1109     Updates.push_back({DominatorTree::Delete, BB, Succ});
1110   }
1111 
1112   if (isa<PHINode>(Succ->begin())) {
1113     // If there is more than one pred of succ, and there are PHI nodes in
1114     // the successor, then we need to add incoming edges for the PHI nodes
1115     //
1116     const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
1117 
1118     // Loop over all of the PHI nodes in the successor of BB.
1119     for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1120       PHINode *PN = cast<PHINode>(I);
1121 
1122       redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
1123     }
1124   }
1125 
1126   if (Succ->getSinglePredecessor()) {
1127     // BB is the only predecessor of Succ, so Succ will end up with exactly
1128     // the same predecessors BB had.
1129 
1130     // Copy over any phi, debug or lifetime instruction.
1131     BB->getTerminator()->eraseFromParent();
1132     Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
1133                                BB->getInstList());
1134   } else {
1135     while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1136       // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1137       assert(PN->use_empty() && "There shouldn't be any uses here!");
1138       PN->eraseFromParent();
1139     }
1140   }
1141 
1142   // If the unconditional branch we replaced contains llvm.loop metadata, we
1143   // add the metadata to the branch instructions in the predecessors.
1144   unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
1145   Instruction *TI = BB->getTerminator();
1146   if (TI)
1147     if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
1148       for (BasicBlock *Pred : predecessors(BB))
1149         Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
1150 
1151   // Everything that jumped to BB now goes to Succ.
1152   BB->replaceAllUsesWith(Succ);
1153   if (!Succ->hasName()) Succ->takeName(BB);
1154 
1155   // Clear the successor list of BB to match updates applying to DTU later.
1156   if (BB->getTerminator())
1157     BB->getInstList().pop_back();
1158   new UnreachableInst(BB->getContext(), BB);
1159   assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1160                            "applying corresponding DTU updates.");
1161 
1162   if (DTU)
1163     DTU->applyUpdates(Updates);
1164 
1165   DeleteDeadBlock(BB, DTU);
1166 
1167   return true;
1168 }
1169 
1170 static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB) {
1171   // This implementation doesn't currently consider undef operands
1172   // specially. Theoretically, two phis which are identical except for
1173   // one having an undef where the other doesn't could be collapsed.
1174 
1175   bool Changed = false;
1176 
1177   // Examine each PHI.
1178   // Note that increment of I must *NOT* be in the iteration_expression, since
1179   // we don't want to immediately advance when we restart from the beginning.
1180   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1181     ++I;
1182     // Is there an identical PHI node in this basic block?
1183     // Note that we only look in the upper square's triangle,
1184     // we already checked that the lower triangle PHI's aren't identical.
1185     for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1186       if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1187         continue;
1188       // A duplicate. Replace this PHI with the base PHI.
1189       ++NumPHICSEs;
1190       DuplicatePN->replaceAllUsesWith(PN);
1191       DuplicatePN->eraseFromParent();
1192       Changed = true;
1193 
1194       // The RAUW can change PHIs that we already visited.
1195       I = BB->begin();
1196       break; // Start over from the beginning.
1197     }
1198   }
1199   return Changed;
1200 }
1201 
1202 static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB) {
1203   // This implementation doesn't currently consider undef operands
1204   // specially. Theoretically, two phis which are identical except for
1205   // one having an undef where the other doesn't could be collapsed.
1206 
1207   struct PHIDenseMapInfo {
1208     static PHINode *getEmptyKey() {
1209       return DenseMapInfo<PHINode *>::getEmptyKey();
1210     }
1211 
1212     static PHINode *getTombstoneKey() {
1213       return DenseMapInfo<PHINode *>::getTombstoneKey();
1214     }
1215 
1216     static bool isSentinel(PHINode *PN) {
1217       return PN == getEmptyKey() || PN == getTombstoneKey();
1218     }
1219 
1220     // WARNING: this logic must be kept in sync with
1221     //          Instruction::isIdenticalToWhenDefined()!
1222     static unsigned getHashValueImpl(PHINode *PN) {
1223       // Compute a hash value on the operands. Instcombine will likely have
1224       // sorted them, which helps expose duplicates, but we have to check all
1225       // the operands to be safe in case instcombine hasn't run.
1226       return static_cast<unsigned>(hash_combine(
1227           hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1228           hash_combine_range(PN->block_begin(), PN->block_end())));
1229     }
1230 
1231     static unsigned getHashValue(PHINode *PN) {
1232 #ifndef NDEBUG
1233       // If -phicse-debug-hash was specified, return a constant -- this
1234       // will force all hashing to collide, so we'll exhaustively search
1235       // the table for a match, and the assertion in isEqual will fire if
1236       // there's a bug causing equal keys to hash differently.
1237       if (PHICSEDebugHash)
1238         return 0;
1239 #endif
1240       return getHashValueImpl(PN);
1241     }
1242 
1243     static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1244       if (isSentinel(LHS) || isSentinel(RHS))
1245         return LHS == RHS;
1246       return LHS->isIdenticalTo(RHS);
1247     }
1248 
1249     static bool isEqual(PHINode *LHS, PHINode *RHS) {
1250       // These comparisons are nontrivial, so assert that equality implies
1251       // hash equality (DenseMap demands this as an invariant).
1252       bool Result = isEqualImpl(LHS, RHS);
1253       assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1254              getHashValueImpl(LHS) == getHashValueImpl(RHS));
1255       return Result;
1256     }
1257   };
1258 
1259   // Set of unique PHINodes.
1260   DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1261   PHISet.reserve(4 * PHICSENumPHISmallSize);
1262 
1263   // Examine each PHI.
1264   bool Changed = false;
1265   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1266     auto Inserted = PHISet.insert(PN);
1267     if (!Inserted.second) {
1268       // A duplicate. Replace this PHI with its duplicate.
1269       ++NumPHICSEs;
1270       PN->replaceAllUsesWith(*Inserted.first);
1271       PN->eraseFromParent();
1272       Changed = true;
1273 
1274       // The RAUW can change PHIs that we already visited. Start over from the
1275       // beginning.
1276       PHISet.clear();
1277       I = BB->begin();
1278     }
1279   }
1280 
1281   return Changed;
1282 }
1283 
1284 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1285   if (
1286 #ifndef NDEBUG
1287       !PHICSEDebugHash &&
1288 #endif
1289       hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize))
1290     return EliminateDuplicatePHINodesNaiveImpl(BB);
1291   return EliminateDuplicatePHINodesSetBasedImpl(BB);
1292 }
1293 
1294 /// If the specified pointer points to an object that we control, try to modify
1295 /// the object's alignment to PrefAlign. Returns a minimum known alignment of
1296 /// the value after the operation, which may be lower than PrefAlign.
1297 ///
1298 /// Increating value alignment isn't often possible though. If alignment is
1299 /// important, a more reliable approach is to simply align all global variables
1300 /// and allocation instructions to their preferred alignment from the beginning.
1301 static Align tryEnforceAlignment(Value *V, Align PrefAlign,
1302                                  const DataLayout &DL) {
1303   V = V->stripPointerCasts();
1304 
1305   if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1306     // TODO: Ideally, this function would not be called if PrefAlign is smaller
1307     // than the current alignment, as the known bits calculation should have
1308     // already taken it into account. However, this is not always the case,
1309     // as computeKnownBits() has a depth limit, while stripPointerCasts()
1310     // doesn't.
1311     Align CurrentAlign = AI->getAlign();
1312     if (PrefAlign <= CurrentAlign)
1313       return CurrentAlign;
1314 
1315     // If the preferred alignment is greater than the natural stack alignment
1316     // then don't round up. This avoids dynamic stack realignment.
1317     if (DL.exceedsNaturalStackAlignment(PrefAlign))
1318       return CurrentAlign;
1319     AI->setAlignment(PrefAlign);
1320     return PrefAlign;
1321   }
1322 
1323   if (auto *GO = dyn_cast<GlobalObject>(V)) {
1324     // TODO: as above, this shouldn't be necessary.
1325     Align CurrentAlign = GO->getPointerAlignment(DL);
1326     if (PrefAlign <= CurrentAlign)
1327       return CurrentAlign;
1328 
1329     // If there is a large requested alignment and we can, bump up the alignment
1330     // of the global.  If the memory we set aside for the global may not be the
1331     // memory used by the final program then it is impossible for us to reliably
1332     // enforce the preferred alignment.
1333     if (!GO->canIncreaseAlignment())
1334       return CurrentAlign;
1335 
1336     GO->setAlignment(PrefAlign);
1337     return PrefAlign;
1338   }
1339 
1340   return Align(1);
1341 }
1342 
1343 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1344                                        const DataLayout &DL,
1345                                        const Instruction *CxtI,
1346                                        AssumptionCache *AC,
1347                                        const DominatorTree *DT) {
1348   assert(V->getType()->isPointerTy() &&
1349          "getOrEnforceKnownAlignment expects a pointer!");
1350 
1351   KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1352   unsigned TrailZ = Known.countMinTrailingZeros();
1353 
1354   // Avoid trouble with ridiculously large TrailZ values, such as
1355   // those computed from a null pointer.
1356   // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1357   TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1358 
1359   Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1360 
1361   if (PrefAlign && *PrefAlign > Alignment)
1362     Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1363 
1364   // We don't need to make any adjustment.
1365   return Alignment;
1366 }
1367 
1368 ///===---------------------------------------------------------------------===//
1369 ///  Dbg Intrinsic utilities
1370 ///
1371 
1372 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1373 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1374                              DIExpression *DIExpr,
1375                              PHINode *APN) {
1376   // Since we can't guarantee that the original dbg.declare instrinsic
1377   // is removed by LowerDbgDeclare(), we need to make sure that we are
1378   // not inserting the same dbg.value intrinsic over and over.
1379   SmallVector<DbgValueInst *, 1> DbgValues;
1380   findDbgValues(DbgValues, APN);
1381   for (auto *DVI : DbgValues) {
1382     assert(is_contained(DVI->getValues(), APN));
1383     if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1384       return true;
1385   }
1386   return false;
1387 }
1388 
1389 /// Check if the alloc size of \p ValTy is large enough to cover the variable
1390 /// (or fragment of the variable) described by \p DII.
1391 ///
1392 /// This is primarily intended as a helper for the different
1393 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is
1394 /// converted describes an alloca'd variable, so we need to use the
1395 /// alloc size of the value when doing the comparison. E.g. an i1 value will be
1396 /// identified as covering an n-bit fragment, if the store size of i1 is at
1397 /// least n bits.
1398 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1399   const DataLayout &DL = DII->getModule()->getDataLayout();
1400   TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1401   if (Optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits()) {
1402     assert(!ValueSize.isScalable() &&
1403            "Fragments don't work on scalable types.");
1404     return ValueSize.getFixedSize() >= *FragmentSize;
1405   }
1406   // We can't always calculate the size of the DI variable (e.g. if it is a
1407   // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1408   // intead.
1409   if (DII->isAddressOfVariable()) {
1410     // DII should have exactly 1 location when it is an address.
1411     assert(DII->getNumVariableLocationOps() == 1 &&
1412            "address of variable must have exactly 1 location operand.");
1413     if (auto *AI =
1414             dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1415       if (Optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1416         assert(ValueSize.isScalable() == FragmentSize->isScalable() &&
1417                "Both sizes should agree on the scalable flag.");
1418         return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1419       }
1420     }
1421   }
1422   // Could not determine size of variable. Conservatively return false.
1423   return false;
1424 }
1425 
1426 /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted
1427 /// to a dbg.value. Because no machine insts can come from debug intrinsics,
1428 /// only the scope and inlinedAt is significant. Zero line numbers are used in
1429 /// case this DebugLoc leaks into any adjacent instructions.
1430 static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) {
1431   // Original dbg.declare must have a location.
1432   const DebugLoc &DeclareLoc = DII->getDebugLoc();
1433   MDNode *Scope = DeclareLoc.getScope();
1434   DILocation *InlinedAt = DeclareLoc.getInlinedAt();
1435   // Produce an unknown location with the correct scope / inlinedAt fields.
1436   return DILocation::get(DII->getContext(), 0, 0, Scope, InlinedAt);
1437 }
1438 
1439 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1440 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1441 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1442                                            StoreInst *SI, DIBuilder &Builder) {
1443   assert(DII->isAddressOfVariable());
1444   auto *DIVar = DII->getVariable();
1445   assert(DIVar && "Missing variable");
1446   auto *DIExpr = DII->getExpression();
1447   Value *DV = SI->getValueOperand();
1448 
1449   DebugLoc NewLoc = getDebugValueLoc(DII, SI);
1450 
1451   if (!valueCoversEntireFragment(DV->getType(), DII)) {
1452     // FIXME: If storing to a part of the variable described by the dbg.declare,
1453     // then we want to insert a dbg.value for the corresponding fragment.
1454     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1455                       << *DII << '\n');
1456     // For now, when there is a store to parts of the variable (but we do not
1457     // know which part) we insert an dbg.value instrinsic to indicate that we
1458     // know nothing about the variable's content.
1459     DV = UndefValue::get(DV->getType());
1460     Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1461     return;
1462   }
1463 
1464   Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1465 }
1466 
1467 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1468 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1469 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1470                                            LoadInst *LI, DIBuilder &Builder) {
1471   auto *DIVar = DII->getVariable();
1472   auto *DIExpr = DII->getExpression();
1473   assert(DIVar && "Missing variable");
1474 
1475   if (!valueCoversEntireFragment(LI->getType(), DII)) {
1476     // FIXME: If only referring to a part of the variable described by the
1477     // dbg.declare, then we want to insert a dbg.value for the corresponding
1478     // fragment.
1479     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1480                       << *DII << '\n');
1481     return;
1482   }
1483 
1484   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1485 
1486   // We are now tracking the loaded value instead of the address. In the
1487   // future if multi-location support is added to the IR, it might be
1488   // preferable to keep tracking both the loaded value and the original
1489   // address in case the alloca can not be elided.
1490   Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1491       LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr);
1492   DbgValue->insertAfter(LI);
1493 }
1494 
1495 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1496 /// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1497 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1498                                            PHINode *APN, DIBuilder &Builder) {
1499   auto *DIVar = DII->getVariable();
1500   auto *DIExpr = DII->getExpression();
1501   assert(DIVar && "Missing variable");
1502 
1503   if (PhiHasDebugValue(DIVar, DIExpr, APN))
1504     return;
1505 
1506   if (!valueCoversEntireFragment(APN->getType(), DII)) {
1507     // FIXME: If only referring to a part of the variable described by the
1508     // dbg.declare, then we want to insert a dbg.value for the corresponding
1509     // fragment.
1510     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1511                       << *DII << '\n');
1512     return;
1513   }
1514 
1515   BasicBlock *BB = APN->getParent();
1516   auto InsertionPt = BB->getFirstInsertionPt();
1517 
1518   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1519 
1520   // The block may be a catchswitch block, which does not have a valid
1521   // insertion point.
1522   // FIXME: Insert dbg.value markers in the successors when appropriate.
1523   if (InsertionPt != BB->end())
1524     Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt);
1525 }
1526 
1527 /// Determine whether this alloca is either a VLA or an array.
1528 static bool isArray(AllocaInst *AI) {
1529   return AI->isArrayAllocation() ||
1530          (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1531 }
1532 
1533 /// Determine whether this alloca is a structure.
1534 static bool isStructure(AllocaInst *AI) {
1535   return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1536 }
1537 
1538 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1539 /// of llvm.dbg.value intrinsics.
1540 bool llvm::LowerDbgDeclare(Function &F) {
1541   bool Changed = false;
1542   DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1543   SmallVector<DbgDeclareInst *, 4> Dbgs;
1544   for (auto &FI : F)
1545     for (Instruction &BI : FI)
1546       if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1547         Dbgs.push_back(DDI);
1548 
1549   if (Dbgs.empty())
1550     return Changed;
1551 
1552   for (auto &I : Dbgs) {
1553     DbgDeclareInst *DDI = I;
1554     AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1555     // If this is an alloca for a scalar variable, insert a dbg.value
1556     // at each load and store to the alloca and erase the dbg.declare.
1557     // The dbg.values allow tracking a variable even if it is not
1558     // stored on the stack, while the dbg.declare can only describe
1559     // the stack slot (and at a lexical-scope granularity). Later
1560     // passes will attempt to elide the stack slot.
1561     if (!AI || isArray(AI) || isStructure(AI))
1562       continue;
1563 
1564     // A volatile load/store means that the alloca can't be elided anyway.
1565     if (llvm::any_of(AI->users(), [](User *U) -> bool {
1566           if (LoadInst *LI = dyn_cast<LoadInst>(U))
1567             return LI->isVolatile();
1568           if (StoreInst *SI = dyn_cast<StoreInst>(U))
1569             return SI->isVolatile();
1570           return false;
1571         }))
1572       continue;
1573 
1574     SmallVector<const Value *, 8> WorkList;
1575     WorkList.push_back(AI);
1576     while (!WorkList.empty()) {
1577       const Value *V = WorkList.pop_back_val();
1578       for (auto &AIUse : V->uses()) {
1579         User *U = AIUse.getUser();
1580         if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1581           if (AIUse.getOperandNo() == 1)
1582             ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1583         } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1584           ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1585         } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1586           // This is a call by-value or some other instruction that takes a
1587           // pointer to the variable. Insert a *value* intrinsic that describes
1588           // the variable by dereferencing the alloca.
1589           if (!CI->isLifetimeStartOrEnd()) {
1590             DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr);
1591             auto *DerefExpr =
1592                 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1593             DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr,
1594                                         NewLoc, CI);
1595           }
1596         } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1597           if (BI->getType()->isPointerTy())
1598             WorkList.push_back(BI);
1599         }
1600       }
1601     }
1602     DDI->eraseFromParent();
1603     Changed = true;
1604   }
1605 
1606   if (Changed)
1607   for (BasicBlock &BB : F)
1608     RemoveRedundantDbgInstrs(&BB);
1609 
1610   return Changed;
1611 }
1612 
1613 /// Propagate dbg.value intrinsics through the newly inserted PHIs.
1614 void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1615                                     SmallVectorImpl<PHINode *> &InsertedPHIs) {
1616   assert(BB && "No BasicBlock to clone dbg.value(s) from.");
1617   if (InsertedPHIs.size() == 0)
1618     return;
1619 
1620   // Map existing PHI nodes to their dbg.values.
1621   ValueToValueMapTy DbgValueMap;
1622   for (auto &I : *BB) {
1623     if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1624       for (Value *V : DbgII->location_ops())
1625         if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1626           DbgValueMap.insert({Loc, DbgII});
1627     }
1628   }
1629   if (DbgValueMap.size() == 0)
1630     return;
1631 
1632   // Map a pair of the destination BB and old dbg.value to the new dbg.value,
1633   // so that if a dbg.value is being rewritten to use more than one of the
1634   // inserted PHIs in the same destination BB, we can update the same dbg.value
1635   // with all the new PHIs instead of creating one copy for each.
1636   MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
1637             DbgVariableIntrinsic *>
1638       NewDbgValueMap;
1639   // Then iterate through the new PHIs and look to see if they use one of the
1640   // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
1641   // propagate the info through the new PHI. If we use more than one new PHI in
1642   // a single destination BB with the same old dbg.value, merge the updates so
1643   // that we get a single new dbg.value with all the new PHIs.
1644   for (auto PHI : InsertedPHIs) {
1645     BasicBlock *Parent = PHI->getParent();
1646     // Avoid inserting an intrinsic into an EH block.
1647     if (Parent->getFirstNonPHI()->isEHPad())
1648       continue;
1649     for (auto VI : PHI->operand_values()) {
1650       auto V = DbgValueMap.find(VI);
1651       if (V != DbgValueMap.end()) {
1652         auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
1653         auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1654         if (NewDI == NewDbgValueMap.end()) {
1655           auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
1656           NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1657         }
1658         DbgVariableIntrinsic *NewDbgII = NewDI->second;
1659         // If PHI contains VI as an operand more than once, we may
1660         // replaced it in NewDbgII; confirm that it is present.
1661         if (is_contained(NewDbgII->location_ops(), VI))
1662           NewDbgII->replaceVariableLocationOp(VI, PHI);
1663       }
1664     }
1665   }
1666   // Insert thew new dbg.values into their destination blocks.
1667   for (auto DI : NewDbgValueMap) {
1668     BasicBlock *Parent = DI.first.first;
1669     auto *NewDbgII = DI.second;
1670     auto InsertionPt = Parent->getFirstInsertionPt();
1671     assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1672     NewDbgII->insertBefore(&*InsertionPt);
1673   }
1674 }
1675 
1676 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1677                              DIBuilder &Builder, uint8_t DIExprFlags,
1678                              int Offset) {
1679   auto DbgAddrs = FindDbgAddrUses(Address);
1680   for (DbgVariableIntrinsic *DII : DbgAddrs) {
1681     const DebugLoc &Loc = DII->getDebugLoc();
1682     auto *DIVar = DII->getVariable();
1683     auto *DIExpr = DII->getExpression();
1684     assert(DIVar && "Missing variable");
1685     DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1686     // Insert llvm.dbg.declare immediately before DII, and remove old
1687     // llvm.dbg.declare.
1688     Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII);
1689     DII->eraseFromParent();
1690   }
1691   return !DbgAddrs.empty();
1692 }
1693 
1694 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1695                                         DIBuilder &Builder, int Offset) {
1696   const DebugLoc &Loc = DVI->getDebugLoc();
1697   auto *DIVar = DVI->getVariable();
1698   auto *DIExpr = DVI->getExpression();
1699   assert(DIVar && "Missing variable");
1700 
1701   // This is an alloca-based llvm.dbg.value. The first thing it should do with
1702   // the alloca pointer is dereference it. Otherwise we don't know how to handle
1703   // it and give up.
1704   if (!DIExpr || DIExpr->getNumElements() < 1 ||
1705       DIExpr->getElement(0) != dwarf::DW_OP_deref)
1706     return;
1707 
1708   // Insert the offset before the first deref.
1709   // We could just change the offset argument of dbg.value, but it's unsigned...
1710   if (Offset)
1711     DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1712 
1713   Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1714   DVI->eraseFromParent();
1715 }
1716 
1717 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1718                                     DIBuilder &Builder, int Offset) {
1719   if (auto *L = LocalAsMetadata::getIfExists(AI))
1720     if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1721       for (Use &U : llvm::make_early_inc_range(MDV->uses()))
1722         if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1723           replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1724 }
1725 
1726 /// Where possible to salvage debug information for \p I do so
1727 /// and return True. If not possible mark undef and return False.
1728 void llvm::salvageDebugInfo(Instruction &I) {
1729   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
1730   findDbgUsers(DbgUsers, &I);
1731   salvageDebugInfoForDbgValues(I, DbgUsers);
1732 }
1733 
1734 void llvm::salvageDebugInfoForDbgValues(
1735     Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) {
1736   // This is an arbitrary chosen limit on the maximum number of values we can
1737   // salvage up to in a DIArgList, used for performance reasons.
1738   const unsigned MaxDebugArgs = 16;
1739   bool Salvaged = false;
1740 
1741   for (auto *DII : DbgUsers) {
1742     // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
1743     // are implicitly pointing out the value as a DWARF memory location
1744     // description.
1745     bool StackValue = isa<DbgValueInst>(DII);
1746     auto DIILocation = DII->location_ops();
1747     assert(
1748         is_contained(DIILocation, &I) &&
1749         "DbgVariableIntrinsic must use salvaged instruction as its location");
1750     SmallVector<Value *, 4> AdditionalValues;
1751     // `I` may appear more than once in DII's location ops, and each use of `I`
1752     // must be updated in the DIExpression and potentially have additional
1753     // values added; thus we call salvageDebugInfoImpl for each `I` instance in
1754     // DIILocation.
1755     Value *Op0 = nullptr;
1756     DIExpression *SalvagedExpr = DII->getExpression();
1757     auto LocItr = find(DIILocation, &I);
1758     while (SalvagedExpr && LocItr != DIILocation.end()) {
1759       SmallVector<uint64_t, 16> Ops;
1760       unsigned LocNo = std::distance(DIILocation.begin(), LocItr);
1761       uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
1762       Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
1763       if (!Op0)
1764         break;
1765       SalvagedExpr =
1766           DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
1767       LocItr = std::find(++LocItr, DIILocation.end(), &I);
1768     }
1769     // salvageDebugInfoImpl should fail on examining the first element of
1770     // DbgUsers, or none of them.
1771     if (!Op0)
1772       break;
1773 
1774     DII->replaceVariableLocationOp(&I, Op0);
1775     if (AdditionalValues.empty()) {
1776       DII->setExpression(SalvagedExpr);
1777     } else if (isa<DbgValueInst>(DII) &&
1778                DII->getNumVariableLocationOps() + AdditionalValues.size() <=
1779                    MaxDebugArgs) {
1780       DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
1781     } else {
1782       // Do not salvage using DIArgList for dbg.addr/dbg.declare, as it is
1783       // currently only valid for stack value expressions.
1784       // Also do not salvage if the resulting DIArgList would contain an
1785       // unreasonably large number of values.
1786       Value *Undef = UndefValue::get(I.getOperand(0)->getType());
1787       DII->replaceVariableLocationOp(I.getOperand(0), Undef);
1788     }
1789     LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1790     Salvaged = true;
1791   }
1792 
1793   if (Salvaged)
1794     return;
1795 
1796   for (auto *DII : DbgUsers) {
1797     Value *Undef = UndefValue::get(I.getType());
1798     DII->replaceVariableLocationOp(&I, Undef);
1799   }
1800 }
1801 
1802 Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
1803                            uint64_t CurrentLocOps,
1804                            SmallVectorImpl<uint64_t> &Opcodes,
1805                            SmallVectorImpl<Value *> &AdditionalValues) {
1806   unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
1807   // Rewrite a GEP into a DIExpression.
1808   MapVector<Value *, APInt> VariableOffsets;
1809   APInt ConstantOffset(BitWidth, 0);
1810   if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
1811     return nullptr;
1812   if (!VariableOffsets.empty() && !CurrentLocOps) {
1813     Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
1814     CurrentLocOps = 1;
1815   }
1816   for (auto Offset : VariableOffsets) {
1817     AdditionalValues.push_back(Offset.first);
1818     assert(Offset.second.isStrictlyPositive() &&
1819            "Expected strictly positive multiplier for offset.");
1820     Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
1821                     Offset.second.getZExtValue(), dwarf::DW_OP_mul,
1822                     dwarf::DW_OP_plus});
1823   }
1824   DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
1825   return GEP->getOperand(0);
1826 }
1827 
1828 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
1829   switch (Opcode) {
1830   case Instruction::Add:
1831     return dwarf::DW_OP_plus;
1832   case Instruction::Sub:
1833     return dwarf::DW_OP_minus;
1834   case Instruction::Mul:
1835     return dwarf::DW_OP_mul;
1836   case Instruction::SDiv:
1837     return dwarf::DW_OP_div;
1838   case Instruction::SRem:
1839     return dwarf::DW_OP_mod;
1840   case Instruction::Or:
1841     return dwarf::DW_OP_or;
1842   case Instruction::And:
1843     return dwarf::DW_OP_and;
1844   case Instruction::Xor:
1845     return dwarf::DW_OP_xor;
1846   case Instruction::Shl:
1847     return dwarf::DW_OP_shl;
1848   case Instruction::LShr:
1849     return dwarf::DW_OP_shr;
1850   case Instruction::AShr:
1851     return dwarf::DW_OP_shra;
1852   default:
1853     // TODO: Salvage from each kind of binop we know about.
1854     return 0;
1855   }
1856 }
1857 
1858 Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
1859                              SmallVectorImpl<uint64_t> &Opcodes,
1860                              SmallVectorImpl<Value *> &AdditionalValues) {
1861   // Handle binary operations with constant integer operands as a special case.
1862   auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
1863   // Values wider than 64 bits cannot be represented within a DIExpression.
1864   if (ConstInt && ConstInt->getBitWidth() > 64)
1865     return nullptr;
1866 
1867   Instruction::BinaryOps BinOpcode = BI->getOpcode();
1868   // Push any Constant Int operand onto the expression stack.
1869   if (ConstInt) {
1870     uint64_t Val = ConstInt->getSExtValue();
1871     // Add or Sub Instructions with a constant operand can potentially be
1872     // simplified.
1873     if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
1874       uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
1875       DIExpression::appendOffset(Opcodes, Offset);
1876       return BI->getOperand(0);
1877     }
1878     Opcodes.append({dwarf::DW_OP_constu, Val});
1879   } else {
1880     if (!CurrentLocOps) {
1881       Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
1882       CurrentLocOps = 1;
1883     }
1884     Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
1885     AdditionalValues.push_back(BI->getOperand(1));
1886   }
1887 
1888   // Add salvaged binary operator to expression stack, if it has a valid
1889   // representation in a DIExpression.
1890   uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
1891   if (!DwarfBinOp)
1892     return nullptr;
1893   Opcodes.push_back(DwarfBinOp);
1894   return BI->getOperand(0);
1895 }
1896 
1897 Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
1898                                   SmallVectorImpl<uint64_t> &Ops,
1899                                   SmallVectorImpl<Value *> &AdditionalValues) {
1900   auto &M = *I.getModule();
1901   auto &DL = M.getDataLayout();
1902 
1903   if (auto *CI = dyn_cast<CastInst>(&I)) {
1904     Value *FromValue = CI->getOperand(0);
1905     // No-op casts are irrelevant for debug info.
1906     if (CI->isNoopCast(DL)) {
1907       return FromValue;
1908     }
1909 
1910     Type *Type = CI->getType();
1911     // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
1912     if (Type->isVectorTy() ||
1913         !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I)))
1914       return nullptr;
1915 
1916     unsigned FromTypeBitSize = FromValue->getType()->getScalarSizeInBits();
1917     unsigned ToTypeBitSize = Type->getScalarSizeInBits();
1918 
1919     auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
1920                                           isa<SExtInst>(&I));
1921     Ops.append(ExtOps.begin(), ExtOps.end());
1922     return FromValue;
1923   }
1924 
1925   if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
1926     return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
1927   else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1928     return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
1929   }
1930   // *Not* to do: we should not attempt to salvage load instructions,
1931   // because the validity and lifetime of a dbg.value containing
1932   // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
1933   return nullptr;
1934 }
1935 
1936 /// A replacement for a dbg.value expression.
1937 using DbgValReplacement = Optional<DIExpression *>;
1938 
1939 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
1940 /// possibly moving/undefing users to prevent use-before-def. Returns true if
1941 /// changes are made.
1942 static bool rewriteDebugUsers(
1943     Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
1944     function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) {
1945   // Find debug users of From.
1946   SmallVector<DbgVariableIntrinsic *, 1> Users;
1947   findDbgUsers(Users, &From);
1948   if (Users.empty())
1949     return false;
1950 
1951   // Prevent use-before-def of To.
1952   bool Changed = false;
1953   SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
1954   if (isa<Instruction>(&To)) {
1955     bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
1956 
1957     for (auto *DII : Users) {
1958       // It's common to see a debug user between From and DomPoint. Move it
1959       // after DomPoint to preserve the variable update without any reordering.
1960       if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
1961         LLVM_DEBUG(dbgs() << "MOVE:  " << *DII << '\n');
1962         DII->moveAfter(&DomPoint);
1963         Changed = true;
1964 
1965       // Users which otherwise aren't dominated by the replacement value must
1966       // be salvaged or deleted.
1967       } else if (!DT.dominates(&DomPoint, DII)) {
1968         UndefOrSalvage.insert(DII);
1969       }
1970     }
1971   }
1972 
1973   // Update debug users without use-before-def risk.
1974   for (auto *DII : Users) {
1975     if (UndefOrSalvage.count(DII))
1976       continue;
1977 
1978     DbgValReplacement DVR = RewriteExpr(*DII);
1979     if (!DVR)
1980       continue;
1981 
1982     DII->replaceVariableLocationOp(&From, &To);
1983     DII->setExpression(*DVR);
1984     LLVM_DEBUG(dbgs() << "REWRITE:  " << *DII << '\n');
1985     Changed = true;
1986   }
1987 
1988   if (!UndefOrSalvage.empty()) {
1989     // Try to salvage the remaining debug users.
1990     salvageDebugInfo(From);
1991     Changed = true;
1992   }
1993 
1994   return Changed;
1995 }
1996 
1997 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
1998 /// losslessly preserve the bits and semantics of the value. This predicate is
1999 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2000 ///
2001 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2002 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2003 /// and also does not allow lossless pointer <-> integer conversions.
2004 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
2005                                          Type *ToTy) {
2006   // Trivially compatible types.
2007   if (FromTy == ToTy)
2008     return true;
2009 
2010   // Handle compatible pointer <-> integer conversions.
2011   if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2012     bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2013     bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2014                               !DL.isNonIntegralPointerType(ToTy);
2015     return SameSize && LosslessConversion;
2016   }
2017 
2018   // TODO: This is not exhaustive.
2019   return false;
2020 }
2021 
2022 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
2023                                  Instruction &DomPoint, DominatorTree &DT) {
2024   // Exit early if From has no debug users.
2025   if (!From.isUsedByMetadata())
2026     return false;
2027 
2028   assert(&From != &To && "Can't replace something with itself");
2029 
2030   Type *FromTy = From.getType();
2031   Type *ToTy = To.getType();
2032 
2033   auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2034     return DII.getExpression();
2035   };
2036 
2037   // Handle no-op conversions.
2038   Module &M = *From.getModule();
2039   const DataLayout &DL = M.getDataLayout();
2040   if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2041     return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2042 
2043   // Handle integer-to-integer widening and narrowing.
2044   // FIXME: Use DW_OP_convert when it's available everywhere.
2045   if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2046     uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2047     uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2048     assert(FromBits != ToBits && "Unexpected no-op conversion");
2049 
2050     // When the width of the result grows, assume that a debugger will only
2051     // access the low `FromBits` bits when inspecting the source variable.
2052     if (FromBits < ToBits)
2053       return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2054 
2055     // The width of the result has shrunk. Use sign/zero extension to describe
2056     // the source variable's high bits.
2057     auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2058       DILocalVariable *Var = DII.getVariable();
2059 
2060       // Without knowing signedness, sign/zero extension isn't possible.
2061       auto Signedness = Var->getSignedness();
2062       if (!Signedness)
2063         return None;
2064 
2065       bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2066       return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2067                                      Signed);
2068     };
2069     return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt);
2070   }
2071 
2072   // TODO: Floating-point conversions, vectors.
2073   return false;
2074 }
2075 
2076 std::pair<unsigned, unsigned>
2077 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2078   unsigned NumDeadInst = 0;
2079   unsigned NumDeadDbgInst = 0;
2080   // Delete the instructions backwards, as it has a reduced likelihood of
2081   // having to update as many def-use and use-def chains.
2082   Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2083   while (EndInst != &BB->front()) {
2084     // Delete the next to last instruction.
2085     Instruction *Inst = &*--EndInst->getIterator();
2086     if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2087       Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
2088     if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2089       EndInst = Inst;
2090       continue;
2091     }
2092     if (isa<DbgInfoIntrinsic>(Inst))
2093       ++NumDeadDbgInst;
2094     else
2095       ++NumDeadInst;
2096     Inst->eraseFromParent();
2097   }
2098   return {NumDeadInst, NumDeadDbgInst};
2099 }
2100 
2101 unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2102                                    DomTreeUpdater *DTU,
2103                                    MemorySSAUpdater *MSSAU) {
2104   BasicBlock *BB = I->getParent();
2105 
2106   if (MSSAU)
2107     MSSAU->changeToUnreachable(I);
2108 
2109   SmallSet<BasicBlock *, 8> UniqueSuccessors;
2110 
2111   // Loop over all of the successors, removing BB's entry from any PHI
2112   // nodes.
2113   for (BasicBlock *Successor : successors(BB)) {
2114     Successor->removePredecessor(BB, PreserveLCSSA);
2115     if (DTU)
2116       UniqueSuccessors.insert(Successor);
2117   }
2118   auto *UI = new UnreachableInst(I->getContext(), I);
2119   UI->setDebugLoc(I->getDebugLoc());
2120 
2121   // All instructions after this are dead.
2122   unsigned NumInstrsRemoved = 0;
2123   BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2124   while (BBI != BBE) {
2125     if (!BBI->use_empty())
2126       BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
2127     BB->getInstList().erase(BBI++);
2128     ++NumInstrsRemoved;
2129   }
2130   if (DTU) {
2131     SmallVector<DominatorTree::UpdateType, 8> Updates;
2132     Updates.reserve(UniqueSuccessors.size());
2133     for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2134       Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2135     DTU->applyUpdates(Updates);
2136   }
2137   return NumInstrsRemoved;
2138 }
2139 
2140 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2141   SmallVector<Value *, 8> Args(II->args());
2142   SmallVector<OperandBundleDef, 1> OpBundles;
2143   II->getOperandBundlesAsDefs(OpBundles);
2144   CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2145                                        II->getCalledOperand(), Args, OpBundles);
2146   NewCall->setCallingConv(II->getCallingConv());
2147   NewCall->setAttributes(II->getAttributes());
2148   NewCall->setDebugLoc(II->getDebugLoc());
2149   NewCall->copyMetadata(*II);
2150 
2151   // If the invoke had profile metadata, try converting them for CallInst.
2152   uint64_t TotalWeight;
2153   if (NewCall->extractProfTotalWeight(TotalWeight)) {
2154     // Set the total weight if it fits into i32, otherwise reset.
2155     MDBuilder MDB(NewCall->getContext());
2156     auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2157                           ? nullptr
2158                           : MDB.createBranchWeights({uint32_t(TotalWeight)});
2159     NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2160   }
2161 
2162   return NewCall;
2163 }
2164 
2165 /// changeToCall - Convert the specified invoke into a normal call.
2166 void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2167   CallInst *NewCall = createCallMatchingInvoke(II);
2168   NewCall->takeName(II);
2169   NewCall->insertBefore(II);
2170   II->replaceAllUsesWith(NewCall);
2171 
2172   // Follow the call by a branch to the normal destination.
2173   BasicBlock *NormalDestBB = II->getNormalDest();
2174   BranchInst::Create(NormalDestBB, II);
2175 
2176   // Update PHI nodes in the unwind destination
2177   BasicBlock *BB = II->getParent();
2178   BasicBlock *UnwindDestBB = II->getUnwindDest();
2179   UnwindDestBB->removePredecessor(BB);
2180   II->eraseFromParent();
2181   if (DTU)
2182     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2183 }
2184 
2185 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2186                                                    BasicBlock *UnwindEdge,
2187                                                    DomTreeUpdater *DTU) {
2188   BasicBlock *BB = CI->getParent();
2189 
2190   // Convert this function call into an invoke instruction.  First, split the
2191   // basic block.
2192   BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2193                                  CI->getName() + ".noexc");
2194 
2195   // Delete the unconditional branch inserted by SplitBlock
2196   BB->getInstList().pop_back();
2197 
2198   // Create the new invoke instruction.
2199   SmallVector<Value *, 8> InvokeArgs(CI->args());
2200   SmallVector<OperandBundleDef, 1> OpBundles;
2201 
2202   CI->getOperandBundlesAsDefs(OpBundles);
2203 
2204   // Note: we're round tripping operand bundles through memory here, and that
2205   // can potentially be avoided with a cleverer API design that we do not have
2206   // as of this time.
2207 
2208   InvokeInst *II =
2209       InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
2210                          UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2211   II->setDebugLoc(CI->getDebugLoc());
2212   II->setCallingConv(CI->getCallingConv());
2213   II->setAttributes(CI->getAttributes());
2214 
2215   if (DTU)
2216     DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2217 
2218   // Make sure that anything using the call now uses the invoke!  This also
2219   // updates the CallGraph if present, because it uses a WeakTrackingVH.
2220   CI->replaceAllUsesWith(II);
2221 
2222   // Delete the original call
2223   Split->getInstList().pop_front();
2224   return Split;
2225 }
2226 
2227 static bool markAliveBlocks(Function &F,
2228                             SmallPtrSetImpl<BasicBlock *> &Reachable,
2229                             DomTreeUpdater *DTU = nullptr) {
2230   SmallVector<BasicBlock*, 128> Worklist;
2231   BasicBlock *BB = &F.front();
2232   Worklist.push_back(BB);
2233   Reachable.insert(BB);
2234   bool Changed = false;
2235   do {
2236     BB = Worklist.pop_back_val();
2237 
2238     // Do a quick scan of the basic block, turning any obviously unreachable
2239     // instructions into LLVM unreachable insts.  The instruction combining pass
2240     // canonicalizes unreachable insts into stores to null or undef.
2241     for (Instruction &I : *BB) {
2242       if (auto *CI = dyn_cast<CallInst>(&I)) {
2243         Value *Callee = CI->getCalledOperand();
2244         // Handle intrinsic calls.
2245         if (Function *F = dyn_cast<Function>(Callee)) {
2246           auto IntrinsicID = F->getIntrinsicID();
2247           // Assumptions that are known to be false are equivalent to
2248           // unreachable. Also, if the condition is undefined, then we make the
2249           // choice most beneficial to the optimizer, and choose that to also be
2250           // unreachable.
2251           if (IntrinsicID == Intrinsic::assume) {
2252             if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2253               // Don't insert a call to llvm.trap right before the unreachable.
2254               changeToUnreachable(CI, false, DTU);
2255               Changed = true;
2256               break;
2257             }
2258           } else if (IntrinsicID == Intrinsic::experimental_guard) {
2259             // A call to the guard intrinsic bails out of the current
2260             // compilation unit if the predicate passed to it is false. If the
2261             // predicate is a constant false, then we know the guard will bail
2262             // out of the current compile unconditionally, so all code following
2263             // it is dead.
2264             //
2265             // Note: unlike in llvm.assume, it is not "obviously profitable" for
2266             // guards to treat `undef` as `false` since a guard on `undef` can
2267             // still be useful for widening.
2268             if (match(CI->getArgOperand(0), m_Zero()))
2269               if (!isa<UnreachableInst>(CI->getNextNode())) {
2270                 changeToUnreachable(CI->getNextNode(), false, DTU);
2271                 Changed = true;
2272                 break;
2273               }
2274           }
2275         } else if ((isa<ConstantPointerNull>(Callee) &&
2276                     !NullPointerIsDefined(CI->getFunction())) ||
2277                    isa<UndefValue>(Callee)) {
2278           changeToUnreachable(CI, false, DTU);
2279           Changed = true;
2280           break;
2281         }
2282         if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2283           // If we found a call to a no-return function, insert an unreachable
2284           // instruction after it.  Make sure there isn't *already* one there
2285           // though.
2286           if (!isa<UnreachableInst>(CI->getNextNode())) {
2287             // Don't insert a call to llvm.trap right before the unreachable.
2288             changeToUnreachable(CI->getNextNode(), false, DTU);
2289             Changed = true;
2290           }
2291           break;
2292         }
2293       } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2294         // Store to undef and store to null are undefined and used to signal
2295         // that they should be changed to unreachable by passes that can't
2296         // modify the CFG.
2297 
2298         // Don't touch volatile stores.
2299         if (SI->isVolatile()) continue;
2300 
2301         Value *Ptr = SI->getOperand(1);
2302 
2303         if (isa<UndefValue>(Ptr) ||
2304             (isa<ConstantPointerNull>(Ptr) &&
2305              !NullPointerIsDefined(SI->getFunction(),
2306                                    SI->getPointerAddressSpace()))) {
2307           changeToUnreachable(SI, false, DTU);
2308           Changed = true;
2309           break;
2310         }
2311       }
2312     }
2313 
2314     Instruction *Terminator = BB->getTerminator();
2315     if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2316       // Turn invokes that call 'nounwind' functions into ordinary calls.
2317       Value *Callee = II->getCalledOperand();
2318       if ((isa<ConstantPointerNull>(Callee) &&
2319            !NullPointerIsDefined(BB->getParent())) ||
2320           isa<UndefValue>(Callee)) {
2321         changeToUnreachable(II, false, DTU);
2322         Changed = true;
2323       } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2324         if (II->use_empty() && II->onlyReadsMemory()) {
2325           // jump to the normal destination branch.
2326           BasicBlock *NormalDestBB = II->getNormalDest();
2327           BasicBlock *UnwindDestBB = II->getUnwindDest();
2328           BranchInst::Create(NormalDestBB, II);
2329           UnwindDestBB->removePredecessor(II->getParent());
2330           II->eraseFromParent();
2331           if (DTU)
2332             DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2333         } else
2334           changeToCall(II, DTU);
2335         Changed = true;
2336       }
2337     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2338       // Remove catchpads which cannot be reached.
2339       struct CatchPadDenseMapInfo {
2340         static CatchPadInst *getEmptyKey() {
2341           return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2342         }
2343 
2344         static CatchPadInst *getTombstoneKey() {
2345           return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2346         }
2347 
2348         static unsigned getHashValue(CatchPadInst *CatchPad) {
2349           return static_cast<unsigned>(hash_combine_range(
2350               CatchPad->value_op_begin(), CatchPad->value_op_end()));
2351         }
2352 
2353         static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2354           if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2355               RHS == getEmptyKey() || RHS == getTombstoneKey())
2356             return LHS == RHS;
2357           return LHS->isIdenticalTo(RHS);
2358         }
2359       };
2360 
2361       SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2362       // Set of unique CatchPads.
2363       SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2364                     CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2365           HandlerSet;
2366       detail::DenseSetEmpty Empty;
2367       for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2368                                              E = CatchSwitch->handler_end();
2369            I != E; ++I) {
2370         BasicBlock *HandlerBB = *I;
2371         if (DTU)
2372           ++NumPerSuccessorCases[HandlerBB];
2373         auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
2374         if (!HandlerSet.insert({CatchPad, Empty}).second) {
2375           if (DTU)
2376             --NumPerSuccessorCases[HandlerBB];
2377           CatchSwitch->removeHandler(I);
2378           --I;
2379           --E;
2380           Changed = true;
2381         }
2382       }
2383       if (DTU) {
2384         std::vector<DominatorTree::UpdateType> Updates;
2385         for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2386           if (I.second == 0)
2387             Updates.push_back({DominatorTree::Delete, BB, I.first});
2388         DTU->applyUpdates(Updates);
2389       }
2390     }
2391 
2392     Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2393     for (BasicBlock *Successor : successors(BB))
2394       if (Reachable.insert(Successor).second)
2395         Worklist.push_back(Successor);
2396   } while (!Worklist.empty());
2397   return Changed;
2398 }
2399 
2400 void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
2401   Instruction *TI = BB->getTerminator();
2402 
2403   if (auto *II = dyn_cast<InvokeInst>(TI)) {
2404     changeToCall(II, DTU);
2405     return;
2406   }
2407 
2408   Instruction *NewTI;
2409   BasicBlock *UnwindDest;
2410 
2411   if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2412     NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
2413     UnwindDest = CRI->getUnwindDest();
2414   } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2415     auto *NewCatchSwitch = CatchSwitchInst::Create(
2416         CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2417         CatchSwitch->getName(), CatchSwitch);
2418     for (BasicBlock *PadBB : CatchSwitch->handlers())
2419       NewCatchSwitch->addHandler(PadBB);
2420 
2421     NewTI = NewCatchSwitch;
2422     UnwindDest = CatchSwitch->getUnwindDest();
2423   } else {
2424     llvm_unreachable("Could not find unwind successor");
2425   }
2426 
2427   NewTI->takeName(TI);
2428   NewTI->setDebugLoc(TI->getDebugLoc());
2429   UnwindDest->removePredecessor(BB);
2430   TI->replaceAllUsesWith(NewTI);
2431   TI->eraseFromParent();
2432   if (DTU)
2433     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2434 }
2435 
2436 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
2437 /// if they are in a dead cycle.  Return true if a change was made, false
2438 /// otherwise.
2439 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
2440                                    MemorySSAUpdater *MSSAU) {
2441   SmallPtrSet<BasicBlock *, 16> Reachable;
2442   bool Changed = markAliveBlocks(F, Reachable, DTU);
2443 
2444   // If there are unreachable blocks in the CFG...
2445   if (Reachable.size() == F.size())
2446     return Changed;
2447 
2448   assert(Reachable.size() < F.size());
2449 
2450   // Are there any blocks left to actually delete?
2451   SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2452   for (BasicBlock &BB : F) {
2453     // Skip reachable basic blocks
2454     if (Reachable.count(&BB))
2455       continue;
2456     // Skip already-deleted blocks
2457     if (DTU && DTU->isBBPendingDeletion(&BB))
2458       continue;
2459     BlocksToRemove.insert(&BB);
2460   }
2461 
2462   if (BlocksToRemove.empty())
2463     return Changed;
2464 
2465   Changed = true;
2466   NumRemoved += BlocksToRemove.size();
2467 
2468   if (MSSAU)
2469     MSSAU->removeBlocks(BlocksToRemove);
2470 
2471   DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
2472 
2473   return Changed;
2474 }
2475 
2476 void llvm::combineMetadata(Instruction *K, const Instruction *J,
2477                            ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
2478   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2479   K->dropUnknownNonDebugMetadata(KnownIDs);
2480   K->getAllMetadataOtherThanDebugLoc(Metadata);
2481   for (const auto &MD : Metadata) {
2482     unsigned Kind = MD.first;
2483     MDNode *JMD = J->getMetadata(Kind);
2484     MDNode *KMD = MD.second;
2485 
2486     switch (Kind) {
2487       default:
2488         K->setMetadata(Kind, nullptr); // Remove unknown metadata
2489         break;
2490       case LLVMContext::MD_dbg:
2491         llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2492       case LLVMContext::MD_tbaa:
2493         K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2494         break;
2495       case LLVMContext::MD_alias_scope:
2496         K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2497         break;
2498       case LLVMContext::MD_noalias:
2499       case LLVMContext::MD_mem_parallel_loop_access:
2500         K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2501         break;
2502       case LLVMContext::MD_access_group:
2503         K->setMetadata(LLVMContext::MD_access_group,
2504                        intersectAccessGroups(K, J));
2505         break;
2506       case LLVMContext::MD_range:
2507 
2508         // If K does move, use most generic range. Otherwise keep the range of
2509         // K.
2510         if (DoesKMove)
2511           // FIXME: If K does move, we should drop the range info and nonnull.
2512           //        Currently this function is used with DoesKMove in passes
2513           //        doing hoisting/sinking and the current behavior of using the
2514           //        most generic range is correct in those cases.
2515           K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2516         break;
2517       case LLVMContext::MD_fpmath:
2518         K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2519         break;
2520       case LLVMContext::MD_invariant_load:
2521         // Only set the !invariant.load if it is present in both instructions.
2522         K->setMetadata(Kind, JMD);
2523         break;
2524       case LLVMContext::MD_nonnull:
2525         // If K does move, keep nonull if it is present in both instructions.
2526         if (DoesKMove)
2527           K->setMetadata(Kind, JMD);
2528         break;
2529       case LLVMContext::MD_invariant_group:
2530         // Preserve !invariant.group in K.
2531         break;
2532       case LLVMContext::MD_align:
2533         K->setMetadata(Kind,
2534           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2535         break;
2536       case LLVMContext::MD_dereferenceable:
2537       case LLVMContext::MD_dereferenceable_or_null:
2538         K->setMetadata(Kind,
2539           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2540         break;
2541       case LLVMContext::MD_preserve_access_index:
2542         // Preserve !preserve.access.index in K.
2543         break;
2544     }
2545   }
2546   // Set !invariant.group from J if J has it. If both instructions have it
2547   // then we will just pick it from J - even when they are different.
2548   // Also make sure that K is load or store - f.e. combining bitcast with load
2549   // could produce bitcast with invariant.group metadata, which is invalid.
2550   // FIXME: we should try to preserve both invariant.group md if they are
2551   // different, but right now instruction can only have one invariant.group.
2552   if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2553     if (isa<LoadInst>(K) || isa<StoreInst>(K))
2554       K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2555 }
2556 
2557 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
2558                                  bool KDominatesJ) {
2559   unsigned KnownIDs[] = {
2560       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2561       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2562       LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
2563       LLVMContext::MD_invariant_group, LLVMContext::MD_align,
2564       LLVMContext::MD_dereferenceable,
2565       LLVMContext::MD_dereferenceable_or_null,
2566       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2567   combineMetadata(K, J, KnownIDs, KDominatesJ);
2568 }
2569 
2570 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
2571   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
2572   Source.getAllMetadata(MD);
2573   MDBuilder MDB(Dest.getContext());
2574   Type *NewType = Dest.getType();
2575   const DataLayout &DL = Source.getModule()->getDataLayout();
2576   for (const auto &MDPair : MD) {
2577     unsigned ID = MDPair.first;
2578     MDNode *N = MDPair.second;
2579     // Note, essentially every kind of metadata should be preserved here! This
2580     // routine is supposed to clone a load instruction changing *only its type*.
2581     // The only metadata it makes sense to drop is metadata which is invalidated
2582     // when the pointer type changes. This should essentially never be the case
2583     // in LLVM, but we explicitly switch over only known metadata to be
2584     // conservatively correct. If you are adding metadata to LLVM which pertains
2585     // to loads, you almost certainly want to add it here.
2586     switch (ID) {
2587     case LLVMContext::MD_dbg:
2588     case LLVMContext::MD_tbaa:
2589     case LLVMContext::MD_prof:
2590     case LLVMContext::MD_fpmath:
2591     case LLVMContext::MD_tbaa_struct:
2592     case LLVMContext::MD_invariant_load:
2593     case LLVMContext::MD_alias_scope:
2594     case LLVMContext::MD_noalias:
2595     case LLVMContext::MD_nontemporal:
2596     case LLVMContext::MD_mem_parallel_loop_access:
2597     case LLVMContext::MD_access_group:
2598       // All of these directly apply.
2599       Dest.setMetadata(ID, N);
2600       break;
2601 
2602     case LLVMContext::MD_nonnull:
2603       copyNonnullMetadata(Source, N, Dest);
2604       break;
2605 
2606     case LLVMContext::MD_align:
2607     case LLVMContext::MD_dereferenceable:
2608     case LLVMContext::MD_dereferenceable_or_null:
2609       // These only directly apply if the new type is also a pointer.
2610       if (NewType->isPointerTy())
2611         Dest.setMetadata(ID, N);
2612       break;
2613 
2614     case LLVMContext::MD_range:
2615       copyRangeMetadata(DL, Source, N, Dest);
2616       break;
2617     }
2618   }
2619 }
2620 
2621 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
2622   auto *ReplInst = dyn_cast<Instruction>(Repl);
2623   if (!ReplInst)
2624     return;
2625 
2626   // Patch the replacement so that it is not more restrictive than the value
2627   // being replaced.
2628   // Note that if 'I' is a load being replaced by some operation,
2629   // for example, by an arithmetic operation, then andIRFlags()
2630   // would just erase all math flags from the original arithmetic
2631   // operation, which is clearly not wanted and not needed.
2632   if (!isa<LoadInst>(I))
2633     ReplInst->andIRFlags(I);
2634 
2635   // FIXME: If both the original and replacement value are part of the
2636   // same control-flow region (meaning that the execution of one
2637   // guarantees the execution of the other), then we can combine the
2638   // noalias scopes here and do better than the general conservative
2639   // answer used in combineMetadata().
2640 
2641   // In general, GVN unifies expressions over different control-flow
2642   // regions, and so we need a conservative combination of the noalias
2643   // scopes.
2644   static const unsigned KnownIDs[] = {
2645       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2646       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2647       LLVMContext::MD_fpmath,          LLVMContext::MD_invariant_load,
2648       LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull,
2649       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2650   combineMetadata(ReplInst, I, KnownIDs, false);
2651 }
2652 
2653 template <typename RootType, typename DominatesFn>
2654 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2655                                          const RootType &Root,
2656                                          const DominatesFn &Dominates) {
2657   assert(From->getType() == To->getType());
2658 
2659   unsigned Count = 0;
2660   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2661        UI != UE;) {
2662     Use &U = *UI++;
2663     if (!Dominates(Root, U))
2664       continue;
2665     U.set(To);
2666     LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()
2667                       << "' as " << *To << " in " << *U << "\n");
2668     ++Count;
2669   }
2670   return Count;
2671 }
2672 
2673 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2674    assert(From->getType() == To->getType());
2675    auto *BB = From->getParent();
2676    unsigned Count = 0;
2677 
2678   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2679        UI != UE;) {
2680     Use &U = *UI++;
2681     auto *I = cast<Instruction>(U.getUser());
2682     if (I->getParent() == BB)
2683       continue;
2684     U.set(To);
2685     ++Count;
2686   }
2687   return Count;
2688 }
2689 
2690 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2691                                         DominatorTree &DT,
2692                                         const BasicBlockEdge &Root) {
2693   auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2694     return DT.dominates(Root, U);
2695   };
2696   return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2697 }
2698 
2699 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2700                                         DominatorTree &DT,
2701                                         const BasicBlock *BB) {
2702   auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
2703     return DT.dominates(BB, U);
2704   };
2705   return ::replaceDominatedUsesWith(From, To, BB, Dominates);
2706 }
2707 
2708 bool llvm::callsGCLeafFunction(const CallBase *Call,
2709                                const TargetLibraryInfo &TLI) {
2710   // Check if the function is specifically marked as a gc leaf function.
2711   if (Call->hasFnAttr("gc-leaf-function"))
2712     return true;
2713   if (const Function *F = Call->getCalledFunction()) {
2714     if (F->hasFnAttribute("gc-leaf-function"))
2715       return true;
2716 
2717     if (auto IID = F->getIntrinsicID()) {
2718       // Most LLVM intrinsics do not take safepoints.
2719       return IID != Intrinsic::experimental_gc_statepoint &&
2720              IID != Intrinsic::experimental_deoptimize &&
2721              IID != Intrinsic::memcpy_element_unordered_atomic &&
2722              IID != Intrinsic::memmove_element_unordered_atomic;
2723     }
2724   }
2725 
2726   // Lib calls can be materialized by some passes, and won't be
2727   // marked as 'gc-leaf-function.' All available Libcalls are
2728   // GC-leaf.
2729   LibFunc LF;
2730   if (TLI.getLibFunc(*Call, LF)) {
2731     return TLI.has(LF);
2732   }
2733 
2734   return false;
2735 }
2736 
2737 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2738                                LoadInst &NewLI) {
2739   auto *NewTy = NewLI.getType();
2740 
2741   // This only directly applies if the new type is also a pointer.
2742   if (NewTy->isPointerTy()) {
2743     NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2744     return;
2745   }
2746 
2747   // The only other translation we can do is to integral loads with !range
2748   // metadata.
2749   if (!NewTy->isIntegerTy())
2750     return;
2751 
2752   MDBuilder MDB(NewLI.getContext());
2753   const Value *Ptr = OldLI.getPointerOperand();
2754   auto *ITy = cast<IntegerType>(NewTy);
2755   auto *NullInt = ConstantExpr::getPtrToInt(
2756       ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
2757   auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
2758   NewLI.setMetadata(LLVMContext::MD_range,
2759                     MDB.createRange(NonNullInt, NullInt));
2760 }
2761 
2762 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
2763                              MDNode *N, LoadInst &NewLI) {
2764   auto *NewTy = NewLI.getType();
2765 
2766   // Give up unless it is converted to a pointer where there is a single very
2767   // valuable mapping we can do reliably.
2768   // FIXME: It would be nice to propagate this in more ways, but the type
2769   // conversions make it hard.
2770   if (!NewTy->isPointerTy())
2771     return;
2772 
2773   unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
2774   if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
2775     MDNode *NN = MDNode::get(OldLI.getContext(), None);
2776     NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
2777   }
2778 }
2779 
2780 void llvm::dropDebugUsers(Instruction &I) {
2781   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2782   findDbgUsers(DbgUsers, &I);
2783   for (auto *DII : DbgUsers)
2784     DII->eraseFromParent();
2785 }
2786 
2787 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
2788                                     BasicBlock *BB) {
2789   // Since we are moving the instructions out of its basic block, we do not
2790   // retain their original debug locations (DILocations) and debug intrinsic
2791   // instructions.
2792   //
2793   // Doing so would degrade the debugging experience and adversely affect the
2794   // accuracy of profiling information.
2795   //
2796   // Currently, when hoisting the instructions, we take the following actions:
2797   // - Remove their debug intrinsic instructions.
2798   // - Set their debug locations to the values from the insertion point.
2799   //
2800   // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
2801   // need to be deleted, is because there will not be any instructions with a
2802   // DILocation in either branch left after performing the transformation. We
2803   // can only insert a dbg.value after the two branches are joined again.
2804   //
2805   // See PR38762, PR39243 for more details.
2806   //
2807   // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
2808   // encode predicated DIExpressions that yield different results on different
2809   // code paths.
2810 
2811   for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
2812     Instruction *I = &*II;
2813     I->dropUndefImplyingAttrsAndUnknownMetadata();
2814     if (I->isUsedByMetadata())
2815       dropDebugUsers(*I);
2816     if (I->isDebugOrPseudoInst()) {
2817       // Remove DbgInfo and pseudo probe Intrinsics.
2818       II = I->eraseFromParent();
2819       continue;
2820     }
2821     I->setDebugLoc(InsertPt->getDebugLoc());
2822     ++II;
2823   }
2824   DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(),
2825                                  BB->begin(),
2826                                  BB->getTerminator()->getIterator());
2827 }
2828 
2829 namespace {
2830 
2831 /// A potential constituent of a bitreverse or bswap expression. See
2832 /// collectBitParts for a fuller explanation.
2833 struct BitPart {
2834   BitPart(Value *P, unsigned BW) : Provider(P) {
2835     Provenance.resize(BW);
2836   }
2837 
2838   /// The Value that this is a bitreverse/bswap of.
2839   Value *Provider;
2840 
2841   /// The "provenance" of each bit. Provenance[A] = B means that bit A
2842   /// in Provider becomes bit B in the result of this expression.
2843   SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
2844 
2845   enum { Unset = -1 };
2846 };
2847 
2848 } // end anonymous namespace
2849 
2850 /// Analyze the specified subexpression and see if it is capable of providing
2851 /// pieces of a bswap or bitreverse. The subexpression provides a potential
2852 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
2853 /// the output of the expression came from a corresponding bit in some other
2854 /// value. This function is recursive, and the end result is a mapping of
2855 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
2856 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
2857 ///
2858 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
2859 /// that the expression deposits the low byte of %X into the high byte of the
2860 /// result and that all other bits are zero. This expression is accepted and a
2861 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2862 /// [0-7].
2863 ///
2864 /// For vector types, all analysis is performed at the per-element level. No
2865 /// cross-element analysis is supported (shuffle/insertion/reduction), and all
2866 /// constant masks must be splatted across all elements.
2867 ///
2868 /// To avoid revisiting values, the BitPart results are memoized into the
2869 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
2870 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2871 /// store BitParts objects, not pointers. As we need the concept of a nullptr
2872 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
2873 /// type instead to provide the same functionality.
2874 ///
2875 /// Because we pass around references into \c BPS, we must use a container that
2876 /// does not invalidate internal references (std::map instead of DenseMap).
2877 static const Optional<BitPart> &
2878 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2879                 std::map<Value *, Optional<BitPart>> &BPS, int Depth,
2880                 bool &FoundRoot) {
2881   auto I = BPS.find(V);
2882   if (I != BPS.end())
2883     return I->second;
2884 
2885   auto &Result = BPS[V] = None;
2886   auto BitWidth = V->getType()->getScalarSizeInBits();
2887 
2888   // Can't do integer/elements > 128 bits.
2889   if (BitWidth > 128)
2890     return Result;
2891 
2892   // Prevent stack overflow by limiting the recursion depth
2893   if (Depth == BitPartRecursionMaxDepth) {
2894     LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
2895     return Result;
2896   }
2897 
2898   if (auto *I = dyn_cast<Instruction>(V)) {
2899     Value *X, *Y;
2900     const APInt *C;
2901 
2902     // If this is an or instruction, it may be an inner node of the bswap.
2903     if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
2904       // Check we have both sources and they are from the same provider.
2905       const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
2906                                       Depth + 1, FoundRoot);
2907       if (!A || !A->Provider)
2908         return Result;
2909 
2910       const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
2911                                       Depth + 1, FoundRoot);
2912       if (!B || A->Provider != B->Provider)
2913         return Result;
2914 
2915       // Try and merge the two together.
2916       Result = BitPart(A->Provider, BitWidth);
2917       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
2918         if (A->Provenance[BitIdx] != BitPart::Unset &&
2919             B->Provenance[BitIdx] != BitPart::Unset &&
2920             A->Provenance[BitIdx] != B->Provenance[BitIdx])
2921           return Result = None;
2922 
2923         if (A->Provenance[BitIdx] == BitPart::Unset)
2924           Result->Provenance[BitIdx] = B->Provenance[BitIdx];
2925         else
2926           Result->Provenance[BitIdx] = A->Provenance[BitIdx];
2927       }
2928 
2929       return Result;
2930     }
2931 
2932     // If this is a logical shift by a constant, recurse then shift the result.
2933     if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
2934       const APInt &BitShift = *C;
2935 
2936       // Ensure the shift amount is defined.
2937       if (BitShift.uge(BitWidth))
2938         return Result;
2939 
2940       // For bswap-only, limit shift amounts to whole bytes, for an early exit.
2941       if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
2942         return Result;
2943 
2944       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
2945                                         Depth + 1, FoundRoot);
2946       if (!Res)
2947         return Result;
2948       Result = Res;
2949 
2950       // Perform the "shift" on BitProvenance.
2951       auto &P = Result->Provenance;
2952       if (I->getOpcode() == Instruction::Shl) {
2953         P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
2954         P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
2955       } else {
2956         P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
2957         P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
2958       }
2959 
2960       return Result;
2961     }
2962 
2963     // If this is a logical 'and' with a mask that clears bits, recurse then
2964     // unset the appropriate bits.
2965     if (match(V, m_And(m_Value(X), m_APInt(C)))) {
2966       const APInt &AndMask = *C;
2967 
2968       // Check that the mask allows a multiple of 8 bits for a bswap, for an
2969       // early exit.
2970       unsigned NumMaskedBits = AndMask.countPopulation();
2971       if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
2972         return Result;
2973 
2974       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
2975                                         Depth + 1, FoundRoot);
2976       if (!Res)
2977         return Result;
2978       Result = Res;
2979 
2980       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
2981         // If the AndMask is zero for this bit, clear the bit.
2982         if (AndMask[BitIdx] == 0)
2983           Result->Provenance[BitIdx] = BitPart::Unset;
2984       return Result;
2985     }
2986 
2987     // If this is a zext instruction zero extend the result.
2988     if (match(V, m_ZExt(m_Value(X)))) {
2989       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
2990                                         Depth + 1, FoundRoot);
2991       if (!Res)
2992         return Result;
2993 
2994       Result = BitPart(Res->Provider, BitWidth);
2995       auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
2996       for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
2997         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
2998       for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
2999         Result->Provenance[BitIdx] = BitPart::Unset;
3000       return Result;
3001     }
3002 
3003     // If this is a truncate instruction, extract the lower bits.
3004     if (match(V, m_Trunc(m_Value(X)))) {
3005       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3006                                         Depth + 1, FoundRoot);
3007       if (!Res)
3008         return Result;
3009 
3010       Result = BitPart(Res->Provider, BitWidth);
3011       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3012         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3013       return Result;
3014     }
3015 
3016     // BITREVERSE - most likely due to us previous matching a partial
3017     // bitreverse.
3018     if (match(V, m_BitReverse(m_Value(X)))) {
3019       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3020                                         Depth + 1, FoundRoot);
3021       if (!Res)
3022         return Result;
3023 
3024       Result = BitPart(Res->Provider, BitWidth);
3025       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3026         Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3027       return Result;
3028     }
3029 
3030     // BSWAP - most likely due to us previous matching a partial bswap.
3031     if (match(V, m_BSwap(m_Value(X)))) {
3032       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3033                                         Depth + 1, FoundRoot);
3034       if (!Res)
3035         return Result;
3036 
3037       unsigned ByteWidth = BitWidth / 8;
3038       Result = BitPart(Res->Provider, BitWidth);
3039       for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3040         unsigned ByteBitOfs = ByteIdx * 8;
3041         for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3042           Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3043               Res->Provenance[ByteBitOfs + BitIdx];
3044       }
3045       return Result;
3046     }
3047 
3048     // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3049     // amount (modulo).
3050     // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3051     // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3052     if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3053         match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3054       // We can treat fshr as a fshl by flipping the modulo amount.
3055       unsigned ModAmt = C->urem(BitWidth);
3056       if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3057         ModAmt = BitWidth - ModAmt;
3058 
3059       // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3060       if (!MatchBitReversals && (ModAmt % 8) != 0)
3061         return Result;
3062 
3063       // Check we have both sources and they are from the same provider.
3064       const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3065                                         Depth + 1, FoundRoot);
3066       if (!LHS || !LHS->Provider)
3067         return Result;
3068 
3069       const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3070                                         Depth + 1, FoundRoot);
3071       if (!RHS || LHS->Provider != RHS->Provider)
3072         return Result;
3073 
3074       unsigned StartBitRHS = BitWidth - ModAmt;
3075       Result = BitPart(LHS->Provider, BitWidth);
3076       for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3077         Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3078       for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3079         Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3080       return Result;
3081     }
3082   }
3083 
3084   // If we've already found a root input value then we're never going to merge
3085   // these back together.
3086   if (FoundRoot)
3087     return Result;
3088 
3089   // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3090   // be the root input value to the bswap/bitreverse.
3091   FoundRoot = true;
3092   Result = BitPart(V, BitWidth);
3093   for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3094     Result->Provenance[BitIdx] = BitIdx;
3095   return Result;
3096 }
3097 
3098 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3099                                           unsigned BitWidth) {
3100   if (From % 8 != To % 8)
3101     return false;
3102   // Convert from bit indices to byte indices and check for a byte reversal.
3103   From >>= 3;
3104   To >>= 3;
3105   BitWidth >>= 3;
3106   return From == BitWidth - To - 1;
3107 }
3108 
3109 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3110                                                unsigned BitWidth) {
3111   return From == BitWidth - To - 1;
3112 }
3113 
3114 bool llvm::recognizeBSwapOrBitReverseIdiom(
3115     Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3116     SmallVectorImpl<Instruction *> &InsertedInsts) {
3117   if (!match(I, m_Or(m_Value(), m_Value())) &&
3118       !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3119       !match(I, m_FShr(m_Value(), m_Value(), m_Value())))
3120     return false;
3121   if (!MatchBSwaps && !MatchBitReversals)
3122     return false;
3123   Type *ITy = I->getType();
3124   if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3125     return false;  // Can't do integer/elements > 128 bits.
3126 
3127   Type *DemandedTy = ITy;
3128   if (I->hasOneUse())
3129     if (auto *Trunc = dyn_cast<TruncInst>(I->user_back()))
3130       DemandedTy = Trunc->getType();
3131 
3132   // Try to find all the pieces corresponding to the bswap.
3133   bool FoundRoot = false;
3134   std::map<Value *, Optional<BitPart>> BPS;
3135   const auto &Res =
3136       collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3137   if (!Res)
3138     return false;
3139   ArrayRef<int8_t> BitProvenance = Res->Provenance;
3140   assert(all_of(BitProvenance,
3141                 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3142          "Illegal bit provenance index");
3143 
3144   // If the upper bits are zero, then attempt to perform as a truncated op.
3145   if (BitProvenance.back() == BitPart::Unset) {
3146     while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3147       BitProvenance = BitProvenance.drop_back();
3148     if (BitProvenance.empty())
3149       return false; // TODO - handle null value?
3150     DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3151     if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3152       DemandedTy = VectorType::get(DemandedTy, IVecTy);
3153   }
3154 
3155   // Check BitProvenance hasn't found a source larger than the result type.
3156   unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3157   if (DemandedBW > ITy->getScalarSizeInBits())
3158     return false;
3159 
3160   // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3161   // only byteswap values with an even number of bytes.
3162   APInt DemandedMask = APInt::getAllOnesValue(DemandedBW);
3163   bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3164   bool OKForBitReverse = MatchBitReversals;
3165   for (unsigned BitIdx = 0;
3166        (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3167     if (BitProvenance[BitIdx] == BitPart::Unset) {
3168       DemandedMask.clearBit(BitIdx);
3169       continue;
3170     }
3171     OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3172                                                 DemandedBW);
3173     OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3174                                                           BitIdx, DemandedBW);
3175   }
3176 
3177   Intrinsic::ID Intrin;
3178   if (OKForBSwap)
3179     Intrin = Intrinsic::bswap;
3180   else if (OKForBitReverse)
3181     Intrin = Intrinsic::bitreverse;
3182   else
3183     return false;
3184 
3185   Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
3186   Value *Provider = Res->Provider;
3187 
3188   // We may need to truncate the provider.
3189   if (DemandedTy != Provider->getType()) {
3190     auto *Trunc =
3191         CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I);
3192     InsertedInsts.push_back(Trunc);
3193     Provider = Trunc;
3194   }
3195 
3196   Instruction *Result = CallInst::Create(F, Provider, "rev", I);
3197   InsertedInsts.push_back(Result);
3198 
3199   if (!DemandedMask.isAllOnesValue()) {
3200     auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3201     Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I);
3202     InsertedInsts.push_back(Result);
3203   }
3204 
3205   // We may need to zeroextend back to the result type.
3206   if (ITy != Result->getType()) {
3207     auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I);
3208     InsertedInsts.push_back(ExtInst);
3209   }
3210 
3211   return true;
3212 }
3213 
3214 // CodeGen has special handling for some string functions that may replace
3215 // them with target-specific intrinsics.  Since that'd skip our interceptors
3216 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3217 // we mark affected calls as NoBuiltin, which will disable optimization
3218 // in CodeGen.
3219 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
3220     CallInst *CI, const TargetLibraryInfo *TLI) {
3221   Function *F = CI->getCalledFunction();
3222   LibFunc Func;
3223   if (F && !F->hasLocalLinkage() && F->hasName() &&
3224       TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3225       !F->doesNotAccessMemory())
3226     CI->addFnAttr(Attribute::NoBuiltin);
3227 }
3228 
3229 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
3230   // We can't have a PHI with a metadata type.
3231   if (I->getOperand(OpIdx)->getType()->isMetadataTy())
3232     return false;
3233 
3234   // Early exit.
3235   if (!isa<Constant>(I->getOperand(OpIdx)))
3236     return true;
3237 
3238   switch (I->getOpcode()) {
3239   default:
3240     return true;
3241   case Instruction::Call:
3242   case Instruction::Invoke: {
3243     const auto &CB = cast<CallBase>(*I);
3244 
3245     // Can't handle inline asm. Skip it.
3246     if (CB.isInlineAsm())
3247       return false;
3248 
3249     // Constant bundle operands may need to retain their constant-ness for
3250     // correctness.
3251     if (CB.isBundleOperand(OpIdx))
3252       return false;
3253 
3254     if (OpIdx < CB.getNumArgOperands()) {
3255       // Some variadic intrinsics require constants in the variadic arguments,
3256       // which currently aren't markable as immarg.
3257       if (isa<IntrinsicInst>(CB) &&
3258           OpIdx >= CB.getFunctionType()->getNumParams()) {
3259         // This is known to be OK for stackmap.
3260         return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3261       }
3262 
3263       // gcroot is a special case, since it requires a constant argument which
3264       // isn't also required to be a simple ConstantInt.
3265       if (CB.getIntrinsicID() == Intrinsic::gcroot)
3266         return false;
3267 
3268       // Some intrinsic operands are required to be immediates.
3269       return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3270     }
3271 
3272     // It is never allowed to replace the call argument to an intrinsic, but it
3273     // may be possible for a call.
3274     return !isa<IntrinsicInst>(CB);
3275   }
3276   case Instruction::ShuffleVector:
3277     // Shufflevector masks are constant.
3278     return OpIdx != 2;
3279   case Instruction::Switch:
3280   case Instruction::ExtractValue:
3281     // All operands apart from the first are constant.
3282     return OpIdx == 0;
3283   case Instruction::InsertValue:
3284     // All operands apart from the first and the second are constant.
3285     return OpIdx < 2;
3286   case Instruction::Alloca:
3287     // Static allocas (constant size in the entry block) are handled by
3288     // prologue/epilogue insertion so they're free anyway. We definitely don't
3289     // want to make them non-constant.
3290     return !cast<AllocaInst>(I)->isStaticAlloca();
3291   case Instruction::GetElementPtr:
3292     if (OpIdx == 0)
3293       return true;
3294     gep_type_iterator It = gep_type_begin(I);
3295     for (auto E = std::next(It, OpIdx); It != E; ++It)
3296       if (It.isStruct())
3297         return false;
3298     return true;
3299   }
3300 }
3301 
3302 Value *llvm::invertCondition(Value *Condition) {
3303   // First: Check if it's a constant
3304   if (Constant *C = dyn_cast<Constant>(Condition))
3305     return ConstantExpr::getNot(C);
3306 
3307   // Second: If the condition is already inverted, return the original value
3308   Value *NotCondition;
3309   if (match(Condition, m_Not(m_Value(NotCondition))))
3310     return NotCondition;
3311 
3312   BasicBlock *Parent = nullptr;
3313   Instruction *Inst = dyn_cast<Instruction>(Condition);
3314   if (Inst)
3315     Parent = Inst->getParent();
3316   else if (Argument *Arg = dyn_cast<Argument>(Condition))
3317     Parent = &Arg->getParent()->getEntryBlock();
3318   assert(Parent && "Unsupported condition to invert");
3319 
3320   // Third: Check all the users for an invert
3321   for (User *U : Condition->users())
3322     if (Instruction *I = dyn_cast<Instruction>(U))
3323       if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
3324         return I;
3325 
3326   // Last option: Create a new instruction
3327   auto *Inverted =
3328       BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
3329   if (Inst && !isa<PHINode>(Inst))
3330     Inverted->insertAfter(Inst);
3331   else
3332     Inverted->insertBefore(&*Parent->getFirstInsertionPt());
3333   return Inverted;
3334 }
3335 
3336 bool llvm::inferAttributesFromOthers(Function &F) {
3337   // Note: We explicitly check for attributes rather than using cover functions
3338   // because some of the cover functions include the logic being implemented.
3339 
3340   bool Changed = false;
3341   // readnone + not convergent implies nosync
3342   if (!F.hasFnAttribute(Attribute::NoSync) &&
3343       F.doesNotAccessMemory() && !F.isConvergent()) {
3344     F.setNoSync();
3345     Changed = true;
3346   }
3347 
3348   // readonly implies nofree
3349   if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
3350     F.setDoesNotFreeMemory();
3351     Changed = true;
3352   }
3353 
3354   // willreturn implies mustprogress
3355   if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
3356     F.setMustProgress();
3357     Changed = true;
3358   }
3359 
3360   // TODO: There are a bunch of cases of restrictive memory effects we
3361   // can infer by inspecting arguments of argmemonly-ish functions.
3362 
3363   return Changed;
3364 }
3365