1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Utils/Local.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/DomTreeUpdater.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/LazyValueInfo.h"
33 #include "llvm/Analysis/MemoryBuiltins.h"
34 #include "llvm/Analysis/MemorySSAUpdater.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/Analysis/VectorUtils.h"
38 #include "llvm/BinaryFormat/Dwarf.h"
39 #include "llvm/IR/Argument.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/IR/CFG.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/ConstantRange.h"
45 #include "llvm/IR/Constants.h"
46 #include "llvm/IR/DIBuilder.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Dominators.h"
52 #include "llvm/IR/Function.h"
53 #include "llvm/IR/GetElementPtrTypeIterator.h"
54 #include "llvm/IR/GlobalObject.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InstrTypes.h"
57 #include "llvm/IR/Instruction.h"
58 #include "llvm/IR/Instructions.h"
59 #include "llvm/IR/IntrinsicInst.h"
60 #include "llvm/IR/Intrinsics.h"
61 #include "llvm/IR/LLVMContext.h"
62 #include "llvm/IR/MDBuilder.h"
63 #include "llvm/IR/Metadata.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/Operator.h"
66 #include "llvm/IR/PatternMatch.h"
67 #include "llvm/IR/PseudoProbe.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/KnownBits.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
79 #include "llvm/Transforms/Utils/ValueMapper.h"
80 #include <algorithm>
81 #include <cassert>
82 #include <climits>
83 #include <cstdint>
84 #include <iterator>
85 #include <map>
86 #include <utility>
87 
88 using namespace llvm;
89 using namespace llvm::PatternMatch;
90 
91 #define DEBUG_TYPE "local"
92 
93 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95 
96 static cl::opt<bool> PHICSEDebugHash(
97     "phicse-debug-hash",
98 #ifdef EXPENSIVE_CHECKS
99     cl::init(true),
100 #else
101     cl::init(false),
102 #endif
103     cl::Hidden,
104     cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105              "function is well-behaved w.r.t. its isEqual predicate"));
106 
107 static cl::opt<unsigned> PHICSENumPHISmallSize(
108     "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
109     cl::desc(
110         "When the basic block contains not more than this number of PHI nodes, "
111         "perform a (faster!) exhaustive search instead of set-driven one."));
112 
113 // Max recursion depth for collectBitParts used when detecting bswap and
114 // bitreverse idioms
115 static const unsigned BitPartRecursionMaxDepth = 64;
116 
117 //===----------------------------------------------------------------------===//
118 //  Local constant propagation.
119 //
120 
121 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
122 /// constant value, convert it into an unconditional branch to the constant
123 /// destination.  This is a nontrivial operation because the successors of this
124 /// basic block must have their PHI nodes updated.
125 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
126 /// conditions and indirectbr addresses this might make dead if
127 /// DeleteDeadConditions is true.
128 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
129                                   const TargetLibraryInfo *TLI,
130                                   DomTreeUpdater *DTU) {
131   Instruction *T = BB->getTerminator();
132   IRBuilder<> Builder(T);
133 
134   // Branch - See if we are conditional jumping on constant
135   if (auto *BI = dyn_cast<BranchInst>(T)) {
136     if (BI->isUnconditional()) return false;  // Can't optimize uncond branch
137 
138     BasicBlock *Dest1 = BI->getSuccessor(0);
139     BasicBlock *Dest2 = BI->getSuccessor(1);
140 
141     if (Dest2 == Dest1) {       // Conditional branch to same location?
142       // This branch matches something like this:
143       //     br bool %cond, label %Dest, label %Dest
144       // and changes it into:  br label %Dest
145 
146       // Let the basic block know that we are letting go of one copy of it.
147       assert(BI->getParent() && "Terminator not inserted in block!");
148       Dest1->removePredecessor(BI->getParent());
149 
150       // Replace the conditional branch with an unconditional one.
151       BranchInst *NewBI = Builder.CreateBr(Dest1);
152 
153       // Transfer the metadata to the new branch instruction.
154       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
155                                 LLVMContext::MD_annotation});
156 
157       Value *Cond = BI->getCondition();
158       BI->eraseFromParent();
159       if (DeleteDeadConditions)
160         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
161       return true;
162     }
163 
164     if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
165       // Are we branching on constant?
166       // YES.  Change to unconditional branch...
167       BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
168       BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
169 
170       // Let the basic block know that we are letting go of it.  Based on this,
171       // it will adjust it's PHI nodes.
172       OldDest->removePredecessor(BB);
173 
174       // Replace the conditional branch with an unconditional one.
175       BranchInst *NewBI = Builder.CreateBr(Destination);
176 
177       // Transfer the metadata to the new branch instruction.
178       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
179                                 LLVMContext::MD_annotation});
180 
181       BI->eraseFromParent();
182       if (DTU)
183         DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
184       return true;
185     }
186 
187     return false;
188   }
189 
190   if (auto *SI = dyn_cast<SwitchInst>(T)) {
191     // If we are switching on a constant, we can convert the switch to an
192     // unconditional branch.
193     auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
194     BasicBlock *DefaultDest = SI->getDefaultDest();
195     BasicBlock *TheOnlyDest = DefaultDest;
196 
197     // If the default is unreachable, ignore it when searching for TheOnlyDest.
198     if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
199         SI->getNumCases() > 0) {
200       TheOnlyDest = SI->case_begin()->getCaseSuccessor();
201     }
202 
203     bool Changed = false;
204 
205     // Figure out which case it goes to.
206     for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
207       // Found case matching a constant operand?
208       if (i->getCaseValue() == CI) {
209         TheOnlyDest = i->getCaseSuccessor();
210         break;
211       }
212 
213       // Check to see if this branch is going to the same place as the default
214       // dest.  If so, eliminate it as an explicit compare.
215       if (i->getCaseSuccessor() == DefaultDest) {
216         MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
217         unsigned NCases = SI->getNumCases();
218         // Fold the case metadata into the default if there will be any branches
219         // left, unless the metadata doesn't match the switch.
220         if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
221           // Collect branch weights into a vector.
222           SmallVector<uint32_t, 8> Weights;
223           for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
224                ++MD_i) {
225             auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
226             Weights.push_back(CI->getValue().getZExtValue());
227           }
228           // Merge weight of this case to the default weight.
229           unsigned idx = i->getCaseIndex();
230           Weights[0] += Weights[idx+1];
231           // Remove weight for this case.
232           std::swap(Weights[idx+1], Weights.back());
233           Weights.pop_back();
234           SI->setMetadata(LLVMContext::MD_prof,
235                           MDBuilder(BB->getContext()).
236                           createBranchWeights(Weights));
237         }
238         // Remove this entry.
239         BasicBlock *ParentBB = SI->getParent();
240         DefaultDest->removePredecessor(ParentBB);
241         i = SI->removeCase(i);
242         e = SI->case_end();
243         Changed = true;
244         continue;
245       }
246 
247       // Otherwise, check to see if the switch only branches to one destination.
248       // We do this by reseting "TheOnlyDest" to null when we find two non-equal
249       // destinations.
250       if (i->getCaseSuccessor() != TheOnlyDest)
251         TheOnlyDest = nullptr;
252 
253       // Increment this iterator as we haven't removed the case.
254       ++i;
255     }
256 
257     if (CI && !TheOnlyDest) {
258       // Branching on a constant, but not any of the cases, go to the default
259       // successor.
260       TheOnlyDest = SI->getDefaultDest();
261     }
262 
263     // If we found a single destination that we can fold the switch into, do so
264     // now.
265     if (TheOnlyDest) {
266       // Insert the new branch.
267       Builder.CreateBr(TheOnlyDest);
268       BasicBlock *BB = SI->getParent();
269 
270       SmallSet<BasicBlock *, 8> RemovedSuccessors;
271 
272       // Remove entries from PHI nodes which we no longer branch to...
273       BasicBlock *SuccToKeep = TheOnlyDest;
274       for (BasicBlock *Succ : successors(SI)) {
275         if (DTU && Succ != TheOnlyDest)
276           RemovedSuccessors.insert(Succ);
277         // Found case matching a constant operand?
278         if (Succ == SuccToKeep) {
279           SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
280         } else {
281           Succ->removePredecessor(BB);
282         }
283       }
284 
285       // Delete the old switch.
286       Value *Cond = SI->getCondition();
287       SI->eraseFromParent();
288       if (DeleteDeadConditions)
289         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
290       if (DTU) {
291         std::vector<DominatorTree::UpdateType> Updates;
292         Updates.reserve(RemovedSuccessors.size());
293         for (auto *RemovedSuccessor : RemovedSuccessors)
294           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
295         DTU->applyUpdates(Updates);
296       }
297       return true;
298     }
299 
300     if (SI->getNumCases() == 1) {
301       // Otherwise, we can fold this switch into a conditional branch
302       // instruction if it has only one non-default destination.
303       auto FirstCase = *SI->case_begin();
304       Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
305           FirstCase.getCaseValue(), "cond");
306 
307       // Insert the new branch.
308       BranchInst *NewBr = Builder.CreateCondBr(Cond,
309                                                FirstCase.getCaseSuccessor(),
310                                                SI->getDefaultDest());
311       MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
312       if (MD && MD->getNumOperands() == 3) {
313         ConstantInt *SICase =
314             mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
315         ConstantInt *SIDef =
316             mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
317         assert(SICase && SIDef);
318         // The TrueWeight should be the weight for the single case of SI.
319         NewBr->setMetadata(LLVMContext::MD_prof,
320                         MDBuilder(BB->getContext()).
321                         createBranchWeights(SICase->getValue().getZExtValue(),
322                                             SIDef->getValue().getZExtValue()));
323       }
324 
325       // Update make.implicit metadata to the newly-created conditional branch.
326       MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
327       if (MakeImplicitMD)
328         NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
329 
330       // Delete the old switch.
331       SI->eraseFromParent();
332       return true;
333     }
334     return Changed;
335   }
336 
337   if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
338     // indirectbr blockaddress(@F, @BB) -> br label @BB
339     if (auto *BA =
340           dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
341       BasicBlock *TheOnlyDest = BA->getBasicBlock();
342       SmallSet<BasicBlock *, 8> RemovedSuccessors;
343 
344       // Insert the new branch.
345       Builder.CreateBr(TheOnlyDest);
346 
347       BasicBlock *SuccToKeep = TheOnlyDest;
348       for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
349         BasicBlock *DestBB = IBI->getDestination(i);
350         if (DTU && DestBB != TheOnlyDest)
351           RemovedSuccessors.insert(DestBB);
352         if (IBI->getDestination(i) == SuccToKeep) {
353           SuccToKeep = nullptr;
354         } else {
355           DestBB->removePredecessor(BB);
356         }
357       }
358       Value *Address = IBI->getAddress();
359       IBI->eraseFromParent();
360       if (DeleteDeadConditions)
361         // Delete pointer cast instructions.
362         RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
363 
364       // Also zap the blockaddress constant if there are no users remaining,
365       // otherwise the destination is still marked as having its address taken.
366       if (BA->use_empty())
367         BA->destroyConstant();
368 
369       // If we didn't find our destination in the IBI successor list, then we
370       // have undefined behavior.  Replace the unconditional branch with an
371       // 'unreachable' instruction.
372       if (SuccToKeep) {
373         BB->getTerminator()->eraseFromParent();
374         new UnreachableInst(BB->getContext(), BB);
375       }
376 
377       if (DTU) {
378         std::vector<DominatorTree::UpdateType> Updates;
379         Updates.reserve(RemovedSuccessors.size());
380         for (auto *RemovedSuccessor : RemovedSuccessors)
381           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
382         DTU->applyUpdates(Updates);
383       }
384       return true;
385     }
386   }
387 
388   return false;
389 }
390 
391 //===----------------------------------------------------------------------===//
392 //  Local dead code elimination.
393 //
394 
395 /// isInstructionTriviallyDead - Return true if the result produced by the
396 /// instruction is not used, and the instruction has no side effects.
397 ///
398 bool llvm::isInstructionTriviallyDead(Instruction *I,
399                                       const TargetLibraryInfo *TLI) {
400   if (!I->use_empty())
401     return false;
402   return wouldInstructionBeTriviallyDead(I, TLI);
403 }
404 
405 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
406                                            const TargetLibraryInfo *TLI) {
407   if (I->isTerminator())
408     return false;
409 
410   // We don't want the landingpad-like instructions removed by anything this
411   // general.
412   if (I->isEHPad())
413     return false;
414 
415   // We don't want debug info removed by anything this general, unless
416   // debug info is empty.
417   if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
418     if (DDI->getAddress())
419       return false;
420     return true;
421   }
422   if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
423     if (DVI->hasArgList() || DVI->getValue(0))
424       return false;
425     return true;
426   }
427   if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
428     if (DLI->getLabel())
429       return false;
430     return true;
431   }
432 
433   if (!I->willReturn())
434     return false;
435 
436   if (!I->mayHaveSideEffects())
437     return true;
438 
439   // Special case intrinsics that "may have side effects" but can be deleted
440   // when dead.
441   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
442     // Safe to delete llvm.stacksave and launder.invariant.group if dead.
443     if (II->getIntrinsicID() == Intrinsic::stacksave ||
444         II->getIntrinsicID() == Intrinsic::launder_invariant_group)
445       return true;
446 
447     if (II->isLifetimeStartOrEnd()) {
448       auto *Arg = II->getArgOperand(1);
449       // Lifetime intrinsics are dead when their right-hand is undef.
450       if (isa<UndefValue>(Arg))
451         return true;
452       // If the right-hand is an alloc, global, or argument and the only uses
453       // are lifetime intrinsics then the intrinsics are dead.
454       if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
455         return llvm::all_of(Arg->uses(), [](Use &Use) {
456           if (IntrinsicInst *IntrinsicUse =
457                   dyn_cast<IntrinsicInst>(Use.getUser()))
458             return IntrinsicUse->isLifetimeStartOrEnd();
459           return false;
460         });
461       return false;
462     }
463 
464     // Assumptions are dead if their condition is trivially true.  Guards on
465     // true are operationally no-ops.  In the future we can consider more
466     // sophisticated tradeoffs for guards considering potential for check
467     // widening, but for now we keep things simple.
468     if ((II->getIntrinsicID() == Intrinsic::assume &&
469          isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) ||
470         II->getIntrinsicID() == Intrinsic::experimental_guard) {
471       if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
472         return !Cond->isZero();
473 
474       return false;
475     }
476   }
477 
478   if (isAllocLikeFn(I, TLI))
479     return true;
480 
481   if (CallInst *CI = isFreeCall(I, TLI))
482     if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
483       return C->isNullValue() || isa<UndefValue>(C);
484 
485   if (auto *Call = dyn_cast<CallBase>(I))
486     if (isMathLibCallNoop(Call, TLI))
487       return true;
488 
489   return false;
490 }
491 
492 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
493 /// trivially dead instruction, delete it.  If that makes any of its operands
494 /// trivially dead, delete them too, recursively.  Return true if any
495 /// instructions were deleted.
496 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
497     Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
498     std::function<void(Value *)> AboutToDeleteCallback) {
499   Instruction *I = dyn_cast<Instruction>(V);
500   if (!I || !isInstructionTriviallyDead(I, TLI))
501     return false;
502 
503   SmallVector<WeakTrackingVH, 16> DeadInsts;
504   DeadInsts.push_back(I);
505   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
506                                              AboutToDeleteCallback);
507 
508   return true;
509 }
510 
511 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
512     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
513     MemorySSAUpdater *MSSAU,
514     std::function<void(Value *)> AboutToDeleteCallback) {
515   unsigned S = 0, E = DeadInsts.size(), Alive = 0;
516   for (; S != E; ++S) {
517     auto *I = cast<Instruction>(DeadInsts[S]);
518     if (!isInstructionTriviallyDead(I)) {
519       DeadInsts[S] = nullptr;
520       ++Alive;
521     }
522   }
523   if (Alive == E)
524     return false;
525   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
526                                              AboutToDeleteCallback);
527   return true;
528 }
529 
530 void llvm::RecursivelyDeleteTriviallyDeadInstructions(
531     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
532     MemorySSAUpdater *MSSAU,
533     std::function<void(Value *)> AboutToDeleteCallback) {
534   // Process the dead instruction list until empty.
535   while (!DeadInsts.empty()) {
536     Value *V = DeadInsts.pop_back_val();
537     Instruction *I = cast_or_null<Instruction>(V);
538     if (!I)
539       continue;
540     assert(isInstructionTriviallyDead(I, TLI) &&
541            "Live instruction found in dead worklist!");
542     assert(I->use_empty() && "Instructions with uses are not dead.");
543 
544     // Don't lose the debug info while deleting the instructions.
545     salvageDebugInfo(*I);
546 
547     if (AboutToDeleteCallback)
548       AboutToDeleteCallback(I);
549 
550     // Null out all of the instruction's operands to see if any operand becomes
551     // dead as we go.
552     for (Use &OpU : I->operands()) {
553       Value *OpV = OpU.get();
554       OpU.set(nullptr);
555 
556       if (!OpV->use_empty())
557         continue;
558 
559       // If the operand is an instruction that became dead as we nulled out the
560       // operand, and if it is 'trivially' dead, delete it in a future loop
561       // iteration.
562       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
563         if (isInstructionTriviallyDead(OpI, TLI))
564           DeadInsts.push_back(OpI);
565     }
566     if (MSSAU)
567       MSSAU->removeMemoryAccess(I);
568 
569     I->eraseFromParent();
570   }
571 }
572 
573 bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
574   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
575   findDbgUsers(DbgUsers, I);
576   for (auto *DII : DbgUsers) {
577     Value *Undef = UndefValue::get(I->getType());
578     DII->replaceVariableLocationOp(I, Undef);
579   }
580   return !DbgUsers.empty();
581 }
582 
583 /// areAllUsesEqual - Check whether the uses of a value are all the same.
584 /// This is similar to Instruction::hasOneUse() except this will also return
585 /// true when there are no uses or multiple uses that all refer to the same
586 /// value.
587 static bool areAllUsesEqual(Instruction *I) {
588   Value::user_iterator UI = I->user_begin();
589   Value::user_iterator UE = I->user_end();
590   if (UI == UE)
591     return true;
592 
593   User *TheUse = *UI;
594   for (++UI; UI != UE; ++UI) {
595     if (*UI != TheUse)
596       return false;
597   }
598   return true;
599 }
600 
601 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
602 /// dead PHI node, due to being a def-use chain of single-use nodes that
603 /// either forms a cycle or is terminated by a trivially dead instruction,
604 /// delete it.  If that makes any of its operands trivially dead, delete them
605 /// too, recursively.  Return true if a change was made.
606 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
607                                         const TargetLibraryInfo *TLI,
608                                         llvm::MemorySSAUpdater *MSSAU) {
609   SmallPtrSet<Instruction*, 4> Visited;
610   for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
611        I = cast<Instruction>(*I->user_begin())) {
612     if (I->use_empty())
613       return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
614 
615     // If we find an instruction more than once, we're on a cycle that
616     // won't prove fruitful.
617     if (!Visited.insert(I).second) {
618       // Break the cycle and delete the instruction and its operands.
619       I->replaceAllUsesWith(UndefValue::get(I->getType()));
620       (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
621       return true;
622     }
623   }
624   return false;
625 }
626 
627 static bool
628 simplifyAndDCEInstruction(Instruction *I,
629                           SmallSetVector<Instruction *, 16> &WorkList,
630                           const DataLayout &DL,
631                           const TargetLibraryInfo *TLI) {
632   if (isInstructionTriviallyDead(I, TLI)) {
633     salvageDebugInfo(*I);
634 
635     // Null out all of the instruction's operands to see if any operand becomes
636     // dead as we go.
637     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
638       Value *OpV = I->getOperand(i);
639       I->setOperand(i, nullptr);
640 
641       if (!OpV->use_empty() || I == OpV)
642         continue;
643 
644       // If the operand is an instruction that became dead as we nulled out the
645       // operand, and if it is 'trivially' dead, delete it in a future loop
646       // iteration.
647       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
648         if (isInstructionTriviallyDead(OpI, TLI))
649           WorkList.insert(OpI);
650     }
651 
652     I->eraseFromParent();
653 
654     return true;
655   }
656 
657   if (Value *SimpleV = SimplifyInstruction(I, DL)) {
658     // Add the users to the worklist. CAREFUL: an instruction can use itself,
659     // in the case of a phi node.
660     for (User *U : I->users()) {
661       if (U != I) {
662         WorkList.insert(cast<Instruction>(U));
663       }
664     }
665 
666     // Replace the instruction with its simplified value.
667     bool Changed = false;
668     if (!I->use_empty()) {
669       I->replaceAllUsesWith(SimpleV);
670       Changed = true;
671     }
672     if (isInstructionTriviallyDead(I, TLI)) {
673       I->eraseFromParent();
674       Changed = true;
675     }
676     return Changed;
677   }
678   return false;
679 }
680 
681 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
682 /// simplify any instructions in it and recursively delete dead instructions.
683 ///
684 /// This returns true if it changed the code, note that it can delete
685 /// instructions in other blocks as well in this block.
686 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
687                                        const TargetLibraryInfo *TLI) {
688   bool MadeChange = false;
689   const DataLayout &DL = BB->getModule()->getDataLayout();
690 
691 #ifndef NDEBUG
692   // In debug builds, ensure that the terminator of the block is never replaced
693   // or deleted by these simplifications. The idea of simplification is that it
694   // cannot introduce new instructions, and there is no way to replace the
695   // terminator of a block without introducing a new instruction.
696   AssertingVH<Instruction> TerminatorVH(&BB->back());
697 #endif
698 
699   SmallSetVector<Instruction *, 16> WorkList;
700   // Iterate over the original function, only adding insts to the worklist
701   // if they actually need to be revisited. This avoids having to pre-init
702   // the worklist with the entire function's worth of instructions.
703   for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
704        BI != E;) {
705     assert(!BI->isTerminator());
706     Instruction *I = &*BI;
707     ++BI;
708 
709     // We're visiting this instruction now, so make sure it's not in the
710     // worklist from an earlier visit.
711     if (!WorkList.count(I))
712       MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
713   }
714 
715   while (!WorkList.empty()) {
716     Instruction *I = WorkList.pop_back_val();
717     MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
718   }
719   return MadeChange;
720 }
721 
722 //===----------------------------------------------------------------------===//
723 //  Control Flow Graph Restructuring.
724 //
725 
726 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
727                                        DomTreeUpdater *DTU) {
728 
729   // If BB has single-entry PHI nodes, fold them.
730   while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
731     Value *NewVal = PN->getIncomingValue(0);
732     // Replace self referencing PHI with undef, it must be dead.
733     if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
734     PN->replaceAllUsesWith(NewVal);
735     PN->eraseFromParent();
736   }
737 
738   BasicBlock *PredBB = DestBB->getSinglePredecessor();
739   assert(PredBB && "Block doesn't have a single predecessor!");
740 
741   bool ReplaceEntryBB = false;
742   if (PredBB == &DestBB->getParent()->getEntryBlock())
743     ReplaceEntryBB = true;
744 
745   // DTU updates: Collect all the edges that enter
746   // PredBB. These dominator edges will be redirected to DestBB.
747   SmallVector<DominatorTree::UpdateType, 32> Updates;
748 
749   if (DTU) {
750     SmallPtrSet<BasicBlock *, 2> PredsOfPredBB(pred_begin(PredBB),
751                                                pred_end(PredBB));
752     Updates.reserve(Updates.size() + 2 * PredsOfPredBB.size() + 1);
753     for (BasicBlock *PredOfPredBB : PredsOfPredBB)
754       // This predecessor of PredBB may already have DestBB as a successor.
755       if (PredOfPredBB != PredBB)
756         Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
757     for (BasicBlock *PredOfPredBB : PredsOfPredBB)
758       Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
759     Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
760   }
761 
762   // Zap anything that took the address of DestBB.  Not doing this will give the
763   // address an invalid value.
764   if (DestBB->hasAddressTaken()) {
765     BlockAddress *BA = BlockAddress::get(DestBB);
766     Constant *Replacement =
767       ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
768     BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
769                                                      BA->getType()));
770     BA->destroyConstant();
771   }
772 
773   // Anything that branched to PredBB now branches to DestBB.
774   PredBB->replaceAllUsesWith(DestBB);
775 
776   // Splice all the instructions from PredBB to DestBB.
777   PredBB->getTerminator()->eraseFromParent();
778   DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
779   new UnreachableInst(PredBB->getContext(), PredBB);
780 
781   // If the PredBB is the entry block of the function, move DestBB up to
782   // become the entry block after we erase PredBB.
783   if (ReplaceEntryBB)
784     DestBB->moveAfter(PredBB);
785 
786   if (DTU) {
787     assert(PredBB->getInstList().size() == 1 &&
788            isa<UnreachableInst>(PredBB->getTerminator()) &&
789            "The successor list of PredBB isn't empty before "
790            "applying corresponding DTU updates.");
791     DTU->applyUpdatesPermissive(Updates);
792     DTU->deleteBB(PredBB);
793     // Recalculation of DomTree is needed when updating a forward DomTree and
794     // the Entry BB is replaced.
795     if (ReplaceEntryBB && DTU->hasDomTree()) {
796       // The entry block was removed and there is no external interface for
797       // the dominator tree to be notified of this change. In this corner-case
798       // we recalculate the entire tree.
799       DTU->recalculate(*(DestBB->getParent()));
800     }
801   }
802 
803   else {
804     PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
805   }
806 }
807 
808 /// Return true if we can choose one of these values to use in place of the
809 /// other. Note that we will always choose the non-undef value to keep.
810 static bool CanMergeValues(Value *First, Value *Second) {
811   return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
812 }
813 
814 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional
815 /// branch to Succ, into Succ.
816 ///
817 /// Assumption: Succ is the single successor for BB.
818 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
819   assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
820 
821   LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
822                     << Succ->getName() << "\n");
823   // Shortcut, if there is only a single predecessor it must be BB and merging
824   // is always safe
825   if (Succ->getSinglePredecessor()) return true;
826 
827   // Make a list of the predecessors of BB
828   SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
829 
830   // Look at all the phi nodes in Succ, to see if they present a conflict when
831   // merging these blocks
832   for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
833     PHINode *PN = cast<PHINode>(I);
834 
835     // If the incoming value from BB is again a PHINode in
836     // BB which has the same incoming value for *PI as PN does, we can
837     // merge the phi nodes and then the blocks can still be merged
838     PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
839     if (BBPN && BBPN->getParent() == BB) {
840       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
841         BasicBlock *IBB = PN->getIncomingBlock(PI);
842         if (BBPreds.count(IBB) &&
843             !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
844                             PN->getIncomingValue(PI))) {
845           LLVM_DEBUG(dbgs()
846                      << "Can't fold, phi node " << PN->getName() << " in "
847                      << Succ->getName() << " is conflicting with "
848                      << BBPN->getName() << " with regard to common predecessor "
849                      << IBB->getName() << "\n");
850           return false;
851         }
852       }
853     } else {
854       Value* Val = PN->getIncomingValueForBlock(BB);
855       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
856         // See if the incoming value for the common predecessor is equal to the
857         // one for BB, in which case this phi node will not prevent the merging
858         // of the block.
859         BasicBlock *IBB = PN->getIncomingBlock(PI);
860         if (BBPreds.count(IBB) &&
861             !CanMergeValues(Val, PN->getIncomingValue(PI))) {
862           LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
863                             << " in " << Succ->getName()
864                             << " is conflicting with regard to common "
865                             << "predecessor " << IBB->getName() << "\n");
866           return false;
867         }
868       }
869     }
870   }
871 
872   return true;
873 }
874 
875 using PredBlockVector = SmallVector<BasicBlock *, 16>;
876 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
877 
878 /// Determines the value to use as the phi node input for a block.
879 ///
880 /// Select between \p OldVal any value that we know flows from \p BB
881 /// to a particular phi on the basis of which one (if either) is not
882 /// undef. Update IncomingValues based on the selected value.
883 ///
884 /// \param OldVal The value we are considering selecting.
885 /// \param BB The block that the value flows in from.
886 /// \param IncomingValues A map from block-to-value for other phi inputs
887 /// that we have examined.
888 ///
889 /// \returns the selected value.
890 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
891                                           IncomingValueMap &IncomingValues) {
892   if (!isa<UndefValue>(OldVal)) {
893     assert((!IncomingValues.count(BB) ||
894             IncomingValues.find(BB)->second == OldVal) &&
895            "Expected OldVal to match incoming value from BB!");
896 
897     IncomingValues.insert(std::make_pair(BB, OldVal));
898     return OldVal;
899   }
900 
901   IncomingValueMap::const_iterator It = IncomingValues.find(BB);
902   if (It != IncomingValues.end()) return It->second;
903 
904   return OldVal;
905 }
906 
907 /// Create a map from block to value for the operands of a
908 /// given phi.
909 ///
910 /// Create a map from block to value for each non-undef value flowing
911 /// into \p PN.
912 ///
913 /// \param PN The phi we are collecting the map for.
914 /// \param IncomingValues [out] The map from block to value for this phi.
915 static void gatherIncomingValuesToPhi(PHINode *PN,
916                                       IncomingValueMap &IncomingValues) {
917   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
918     BasicBlock *BB = PN->getIncomingBlock(i);
919     Value *V = PN->getIncomingValue(i);
920 
921     if (!isa<UndefValue>(V))
922       IncomingValues.insert(std::make_pair(BB, V));
923   }
924 }
925 
926 /// Replace the incoming undef values to a phi with the values
927 /// from a block-to-value map.
928 ///
929 /// \param PN The phi we are replacing the undefs in.
930 /// \param IncomingValues A map from block to value.
931 static void replaceUndefValuesInPhi(PHINode *PN,
932                                     const IncomingValueMap &IncomingValues) {
933   SmallVector<unsigned> TrueUndefOps;
934   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
935     Value *V = PN->getIncomingValue(i);
936 
937     if (!isa<UndefValue>(V)) continue;
938 
939     BasicBlock *BB = PN->getIncomingBlock(i);
940     IncomingValueMap::const_iterator It = IncomingValues.find(BB);
941 
942     // Keep track of undef/poison incoming values. Those must match, so we fix
943     // them up below if needed.
944     // Note: this is conservatively correct, but we could try harder and group
945     // the undef values per incoming basic block.
946     if (It == IncomingValues.end()) {
947       TrueUndefOps.push_back(i);
948       continue;
949     }
950 
951     // There is a defined value for this incoming block, so map this undef
952     // incoming value to the defined value.
953     PN->setIncomingValue(i, It->second);
954   }
955 
956   // If there are both undef and poison values incoming, then convert those
957   // values to undef. It is invalid to have different values for the same
958   // incoming block.
959   unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
960     return isa<PoisonValue>(PN->getIncomingValue(i));
961   });
962   if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
963     for (unsigned i : TrueUndefOps)
964       PN->setIncomingValue(i, UndefValue::get(PN->getType()));
965   }
966 }
967 
968 /// Replace a value flowing from a block to a phi with
969 /// potentially multiple instances of that value flowing from the
970 /// block's predecessors to the phi.
971 ///
972 /// \param BB The block with the value flowing into the phi.
973 /// \param BBPreds The predecessors of BB.
974 /// \param PN The phi that we are updating.
975 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
976                                                 const PredBlockVector &BBPreds,
977                                                 PHINode *PN) {
978   Value *OldVal = PN->removeIncomingValue(BB, false);
979   assert(OldVal && "No entry in PHI for Pred BB!");
980 
981   IncomingValueMap IncomingValues;
982 
983   // We are merging two blocks - BB, and the block containing PN - and
984   // as a result we need to redirect edges from the predecessors of BB
985   // to go to the block containing PN, and update PN
986   // accordingly. Since we allow merging blocks in the case where the
987   // predecessor and successor blocks both share some predecessors,
988   // and where some of those common predecessors might have undef
989   // values flowing into PN, we want to rewrite those values to be
990   // consistent with the non-undef values.
991 
992   gatherIncomingValuesToPhi(PN, IncomingValues);
993 
994   // If this incoming value is one of the PHI nodes in BB, the new entries
995   // in the PHI node are the entries from the old PHI.
996   if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
997     PHINode *OldValPN = cast<PHINode>(OldVal);
998     for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
999       // Note that, since we are merging phi nodes and BB and Succ might
1000       // have common predecessors, we could end up with a phi node with
1001       // identical incoming branches. This will be cleaned up later (and
1002       // will trigger asserts if we try to clean it up now, without also
1003       // simplifying the corresponding conditional branch).
1004       BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1005       Value *PredVal = OldValPN->getIncomingValue(i);
1006       Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
1007                                                     IncomingValues);
1008 
1009       // And add a new incoming value for this predecessor for the
1010       // newly retargeted branch.
1011       PN->addIncoming(Selected, PredBB);
1012     }
1013   } else {
1014     for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1015       // Update existing incoming values in PN for this
1016       // predecessor of BB.
1017       BasicBlock *PredBB = BBPreds[i];
1018       Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
1019                                                     IncomingValues);
1020 
1021       // And add a new incoming value for this predecessor for the
1022       // newly retargeted branch.
1023       PN->addIncoming(Selected, PredBB);
1024     }
1025   }
1026 
1027   replaceUndefValuesInPhi(PN, IncomingValues);
1028 }
1029 
1030 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1031                                                    DomTreeUpdater *DTU) {
1032   assert(BB != &BB->getParent()->getEntryBlock() &&
1033          "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1034 
1035   // We can't eliminate infinite loops.
1036   BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1037   if (BB == Succ) return false;
1038 
1039   // Check to see if merging these blocks would cause conflicts for any of the
1040   // phi nodes in BB or Succ. If not, we can safely merge.
1041   if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
1042 
1043   // Check for cases where Succ has multiple predecessors and a PHI node in BB
1044   // has uses which will not disappear when the PHI nodes are merged.  It is
1045   // possible to handle such cases, but difficult: it requires checking whether
1046   // BB dominates Succ, which is non-trivial to calculate in the case where
1047   // Succ has multiple predecessors.  Also, it requires checking whether
1048   // constructing the necessary self-referential PHI node doesn't introduce any
1049   // conflicts; this isn't too difficult, but the previous code for doing this
1050   // was incorrect.
1051   //
1052   // Note that if this check finds a live use, BB dominates Succ, so BB is
1053   // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1054   // folding the branch isn't profitable in that case anyway.
1055   if (!Succ->getSinglePredecessor()) {
1056     BasicBlock::iterator BBI = BB->begin();
1057     while (isa<PHINode>(*BBI)) {
1058       for (Use &U : BBI->uses()) {
1059         if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1060           if (PN->getIncomingBlock(U) != BB)
1061             return false;
1062         } else {
1063           return false;
1064         }
1065       }
1066       ++BBI;
1067     }
1068   }
1069 
1070   // We cannot fold the block if it's a branch to an already present callbr
1071   // successor because that creates duplicate successors.
1072   for (BasicBlock *PredBB : predecessors(BB)) {
1073     if (auto *CBI = dyn_cast<CallBrInst>(PredBB->getTerminator())) {
1074       if (Succ == CBI->getDefaultDest())
1075         return false;
1076       for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i)
1077         if (Succ == CBI->getIndirectDest(i))
1078           return false;
1079     }
1080   }
1081 
1082   LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1083 
1084   SmallVector<DominatorTree::UpdateType, 32> Updates;
1085   if (DTU) {
1086     // All predecessors of BB will be moved to Succ.
1087     SmallPtrSet<BasicBlock *, 8> PredsOfBB(pred_begin(BB), pred_end(BB));
1088     SmallPtrSet<BasicBlock *, 8> PredsOfSucc(pred_begin(Succ), pred_end(Succ));
1089     Updates.reserve(Updates.size() + 2 * PredsOfBB.size() + 1);
1090     for (auto *PredOfBB : PredsOfBB)
1091       // This predecessor of BB may already have Succ as a successor.
1092       if (!PredsOfSucc.contains(PredOfBB))
1093         Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1094     for (auto *PredOfBB : PredsOfBB)
1095       Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1096     Updates.push_back({DominatorTree::Delete, BB, Succ});
1097   }
1098 
1099   if (isa<PHINode>(Succ->begin())) {
1100     // If there is more than one pred of succ, and there are PHI nodes in
1101     // the successor, then we need to add incoming edges for the PHI nodes
1102     //
1103     const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
1104 
1105     // Loop over all of the PHI nodes in the successor of BB.
1106     for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1107       PHINode *PN = cast<PHINode>(I);
1108 
1109       redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
1110     }
1111   }
1112 
1113   if (Succ->getSinglePredecessor()) {
1114     // BB is the only predecessor of Succ, so Succ will end up with exactly
1115     // the same predecessors BB had.
1116 
1117     // Copy over any phi, debug or lifetime instruction.
1118     BB->getTerminator()->eraseFromParent();
1119     Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
1120                                BB->getInstList());
1121   } else {
1122     while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1123       // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1124       assert(PN->use_empty() && "There shouldn't be any uses here!");
1125       PN->eraseFromParent();
1126     }
1127   }
1128 
1129   // If the unconditional branch we replaced contains llvm.loop metadata, we
1130   // add the metadata to the branch instructions in the predecessors.
1131   unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
1132   Instruction *TI = BB->getTerminator();
1133   if (TI)
1134     if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
1135       for (BasicBlock *Pred : predecessors(BB))
1136         Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
1137 
1138   // For AutoFDO, since BB is going to be removed, we won't be able to sample
1139   // it. To avoid assigning a zero weight for BB, move all its pseudo probes
1140   // into Succ and mark them dangling. This should allow the counts inference a
1141   // chance to get a more reasonable weight for BB.
1142   moveAndDanglePseudoProbes(BB, &*Succ->getFirstInsertionPt());
1143 
1144   // Everything that jumped to BB now goes to Succ.
1145   BB->replaceAllUsesWith(Succ);
1146   if (!Succ->hasName()) Succ->takeName(BB);
1147 
1148   // Clear the successor list of BB to match updates applying to DTU later.
1149   if (BB->getTerminator())
1150     BB->getInstList().pop_back();
1151   new UnreachableInst(BB->getContext(), BB);
1152   assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1153                            "applying corresponding DTU updates.");
1154 
1155   if (DTU) {
1156     DTU->applyUpdates(Updates);
1157     DTU->deleteBB(BB);
1158   } else {
1159     BB->eraseFromParent(); // Delete the old basic block.
1160   }
1161   return true;
1162 }
1163 
1164 static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB) {
1165   // This implementation doesn't currently consider undef operands
1166   // specially. Theoretically, two phis which are identical except for
1167   // one having an undef where the other doesn't could be collapsed.
1168 
1169   bool Changed = false;
1170 
1171   // Examine each PHI.
1172   // Note that increment of I must *NOT* be in the iteration_expression, since
1173   // we don't want to immediately advance when we restart from the beginning.
1174   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1175     ++I;
1176     // Is there an identical PHI node in this basic block?
1177     // Note that we only look in the upper square's triangle,
1178     // we already checked that the lower triangle PHI's aren't identical.
1179     for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1180       if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1181         continue;
1182       // A duplicate. Replace this PHI with the base PHI.
1183       ++NumPHICSEs;
1184       DuplicatePN->replaceAllUsesWith(PN);
1185       DuplicatePN->eraseFromParent();
1186       Changed = true;
1187 
1188       // The RAUW can change PHIs that we already visited.
1189       I = BB->begin();
1190       break; // Start over from the beginning.
1191     }
1192   }
1193   return Changed;
1194 }
1195 
1196 static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB) {
1197   // This implementation doesn't currently consider undef operands
1198   // specially. Theoretically, two phis which are identical except for
1199   // one having an undef where the other doesn't could be collapsed.
1200 
1201   struct PHIDenseMapInfo {
1202     static PHINode *getEmptyKey() {
1203       return DenseMapInfo<PHINode *>::getEmptyKey();
1204     }
1205 
1206     static PHINode *getTombstoneKey() {
1207       return DenseMapInfo<PHINode *>::getTombstoneKey();
1208     }
1209 
1210     static bool isSentinel(PHINode *PN) {
1211       return PN == getEmptyKey() || PN == getTombstoneKey();
1212     }
1213 
1214     // WARNING: this logic must be kept in sync with
1215     //          Instruction::isIdenticalToWhenDefined()!
1216     static unsigned getHashValueImpl(PHINode *PN) {
1217       // Compute a hash value on the operands. Instcombine will likely have
1218       // sorted them, which helps expose duplicates, but we have to check all
1219       // the operands to be safe in case instcombine hasn't run.
1220       return static_cast<unsigned>(hash_combine(
1221           hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1222           hash_combine_range(PN->block_begin(), PN->block_end())));
1223     }
1224 
1225     static unsigned getHashValue(PHINode *PN) {
1226 #ifndef NDEBUG
1227       // If -phicse-debug-hash was specified, return a constant -- this
1228       // will force all hashing to collide, so we'll exhaustively search
1229       // the table for a match, and the assertion in isEqual will fire if
1230       // there's a bug causing equal keys to hash differently.
1231       if (PHICSEDebugHash)
1232         return 0;
1233 #endif
1234       return getHashValueImpl(PN);
1235     }
1236 
1237     static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1238       if (isSentinel(LHS) || isSentinel(RHS))
1239         return LHS == RHS;
1240       return LHS->isIdenticalTo(RHS);
1241     }
1242 
1243     static bool isEqual(PHINode *LHS, PHINode *RHS) {
1244       // These comparisons are nontrivial, so assert that equality implies
1245       // hash equality (DenseMap demands this as an invariant).
1246       bool Result = isEqualImpl(LHS, RHS);
1247       assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1248              getHashValueImpl(LHS) == getHashValueImpl(RHS));
1249       return Result;
1250     }
1251   };
1252 
1253   // Set of unique PHINodes.
1254   DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1255   PHISet.reserve(4 * PHICSENumPHISmallSize);
1256 
1257   // Examine each PHI.
1258   bool Changed = false;
1259   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1260     auto Inserted = PHISet.insert(PN);
1261     if (!Inserted.second) {
1262       // A duplicate. Replace this PHI with its duplicate.
1263       ++NumPHICSEs;
1264       PN->replaceAllUsesWith(*Inserted.first);
1265       PN->eraseFromParent();
1266       Changed = true;
1267 
1268       // The RAUW can change PHIs that we already visited. Start over from the
1269       // beginning.
1270       PHISet.clear();
1271       I = BB->begin();
1272     }
1273   }
1274 
1275   return Changed;
1276 }
1277 
1278 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1279   if (
1280 #ifndef NDEBUG
1281       !PHICSEDebugHash &&
1282 #endif
1283       hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize))
1284     return EliminateDuplicatePHINodesNaiveImpl(BB);
1285   return EliminateDuplicatePHINodesSetBasedImpl(BB);
1286 }
1287 
1288 /// If the specified pointer points to an object that we control, try to modify
1289 /// the object's alignment to PrefAlign. Returns a minimum known alignment of
1290 /// the value after the operation, which may be lower than PrefAlign.
1291 ///
1292 /// Increating value alignment isn't often possible though. If alignment is
1293 /// important, a more reliable approach is to simply align all global variables
1294 /// and allocation instructions to their preferred alignment from the beginning.
1295 static Align tryEnforceAlignment(Value *V, Align PrefAlign,
1296                                  const DataLayout &DL) {
1297   V = V->stripPointerCasts();
1298 
1299   if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1300     // TODO: Ideally, this function would not be called if PrefAlign is smaller
1301     // than the current alignment, as the known bits calculation should have
1302     // already taken it into account. However, this is not always the case,
1303     // as computeKnownBits() has a depth limit, while stripPointerCasts()
1304     // doesn't.
1305     Align CurrentAlign = AI->getAlign();
1306     if (PrefAlign <= CurrentAlign)
1307       return CurrentAlign;
1308 
1309     // If the preferred alignment is greater than the natural stack alignment
1310     // then don't round up. This avoids dynamic stack realignment.
1311     if (DL.exceedsNaturalStackAlignment(PrefAlign))
1312       return CurrentAlign;
1313     AI->setAlignment(PrefAlign);
1314     return PrefAlign;
1315   }
1316 
1317   if (auto *GO = dyn_cast<GlobalObject>(V)) {
1318     // TODO: as above, this shouldn't be necessary.
1319     Align CurrentAlign = GO->getPointerAlignment(DL);
1320     if (PrefAlign <= CurrentAlign)
1321       return CurrentAlign;
1322 
1323     // If there is a large requested alignment and we can, bump up the alignment
1324     // of the global.  If the memory we set aside for the global may not be the
1325     // memory used by the final program then it is impossible for us to reliably
1326     // enforce the preferred alignment.
1327     if (!GO->canIncreaseAlignment())
1328       return CurrentAlign;
1329 
1330     GO->setAlignment(PrefAlign);
1331     return PrefAlign;
1332   }
1333 
1334   return Align(1);
1335 }
1336 
1337 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1338                                        const DataLayout &DL,
1339                                        const Instruction *CxtI,
1340                                        AssumptionCache *AC,
1341                                        const DominatorTree *DT) {
1342   assert(V->getType()->isPointerTy() &&
1343          "getOrEnforceKnownAlignment expects a pointer!");
1344 
1345   KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1346   unsigned TrailZ = Known.countMinTrailingZeros();
1347 
1348   // Avoid trouble with ridiculously large TrailZ values, such as
1349   // those computed from a null pointer.
1350   // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1351   TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1352 
1353   Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1354 
1355   if (PrefAlign && *PrefAlign > Alignment)
1356     Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1357 
1358   // We don't need to make any adjustment.
1359   return Alignment;
1360 }
1361 
1362 ///===---------------------------------------------------------------------===//
1363 ///  Dbg Intrinsic utilities
1364 ///
1365 
1366 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1367 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1368                              DIExpression *DIExpr,
1369                              PHINode *APN) {
1370   // Since we can't guarantee that the original dbg.declare instrinsic
1371   // is removed by LowerDbgDeclare(), we need to make sure that we are
1372   // not inserting the same dbg.value intrinsic over and over.
1373   SmallVector<DbgValueInst *, 1> DbgValues;
1374   findDbgValues(DbgValues, APN);
1375   for (auto *DVI : DbgValues) {
1376     assert(is_contained(DVI->getValues(), APN));
1377     if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1378       return true;
1379   }
1380   return false;
1381 }
1382 
1383 /// Check if the alloc size of \p ValTy is large enough to cover the variable
1384 /// (or fragment of the variable) described by \p DII.
1385 ///
1386 /// This is primarily intended as a helper for the different
1387 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is
1388 /// converted describes an alloca'd variable, so we need to use the
1389 /// alloc size of the value when doing the comparison. E.g. an i1 value will be
1390 /// identified as covering an n-bit fragment, if the store size of i1 is at
1391 /// least n bits.
1392 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1393   const DataLayout &DL = DII->getModule()->getDataLayout();
1394   TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1395   if (Optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits()) {
1396     assert(!ValueSize.isScalable() &&
1397            "Fragments don't work on scalable types.");
1398     return ValueSize.getFixedSize() >= *FragmentSize;
1399   }
1400   // We can't always calculate the size of the DI variable (e.g. if it is a
1401   // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1402   // intead.
1403   if (DII->isAddressOfVariable()) {
1404     // DII should have exactly 1 location when it is an address.
1405     assert(DII->getNumVariableLocationOps() == 1 &&
1406            "address of variable must have exactly 1 location operand.");
1407     if (auto *AI =
1408             dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1409       if (Optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1410         assert(ValueSize.isScalable() == FragmentSize->isScalable() &&
1411                "Both sizes should agree on the scalable flag.");
1412         return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1413       }
1414     }
1415   }
1416   // Could not determine size of variable. Conservatively return false.
1417   return false;
1418 }
1419 
1420 /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted
1421 /// to a dbg.value. Because no machine insts can come from debug intrinsics,
1422 /// only the scope and inlinedAt is significant. Zero line numbers are used in
1423 /// case this DebugLoc leaks into any adjacent instructions.
1424 static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) {
1425   // Original dbg.declare must have a location.
1426   const DebugLoc &DeclareLoc = DII->getDebugLoc();
1427   MDNode *Scope = DeclareLoc.getScope();
1428   DILocation *InlinedAt = DeclareLoc.getInlinedAt();
1429   // Produce an unknown location with the correct scope / inlinedAt fields.
1430   return DILocation::get(DII->getContext(), 0, 0, Scope, InlinedAt);
1431 }
1432 
1433 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1434 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1435 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1436                                            StoreInst *SI, DIBuilder &Builder) {
1437   assert(DII->isAddressOfVariable());
1438   auto *DIVar = DII->getVariable();
1439   assert(DIVar && "Missing variable");
1440   auto *DIExpr = DII->getExpression();
1441   Value *DV = SI->getValueOperand();
1442 
1443   DebugLoc NewLoc = getDebugValueLoc(DII, SI);
1444 
1445   if (!valueCoversEntireFragment(DV->getType(), DII)) {
1446     // FIXME: If storing to a part of the variable described by the dbg.declare,
1447     // then we want to insert a dbg.value for the corresponding fragment.
1448     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1449                       << *DII << '\n');
1450     // For now, when there is a store to parts of the variable (but we do not
1451     // know which part) we insert an dbg.value instrinsic to indicate that we
1452     // know nothing about the variable's content.
1453     DV = UndefValue::get(DV->getType());
1454     Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1455     return;
1456   }
1457 
1458   Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1459 }
1460 
1461 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1462 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1463 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1464                                            LoadInst *LI, DIBuilder &Builder) {
1465   auto *DIVar = DII->getVariable();
1466   auto *DIExpr = DII->getExpression();
1467   assert(DIVar && "Missing variable");
1468 
1469   if (!valueCoversEntireFragment(LI->getType(), DII)) {
1470     // FIXME: If only referring to a part of the variable described by the
1471     // dbg.declare, then we want to insert a dbg.value for the corresponding
1472     // fragment.
1473     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1474                       << *DII << '\n');
1475     return;
1476   }
1477 
1478   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1479 
1480   // We are now tracking the loaded value instead of the address. In the
1481   // future if multi-location support is added to the IR, it might be
1482   // preferable to keep tracking both the loaded value and the original
1483   // address in case the alloca can not be elided.
1484   Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1485       LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr);
1486   DbgValue->insertAfter(LI);
1487 }
1488 
1489 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1490 /// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1491 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1492                                            PHINode *APN, DIBuilder &Builder) {
1493   auto *DIVar = DII->getVariable();
1494   auto *DIExpr = DII->getExpression();
1495   assert(DIVar && "Missing variable");
1496 
1497   if (PhiHasDebugValue(DIVar, DIExpr, APN))
1498     return;
1499 
1500   if (!valueCoversEntireFragment(APN->getType(), DII)) {
1501     // FIXME: If only referring to a part of the variable described by the
1502     // dbg.declare, then we want to insert a dbg.value for the corresponding
1503     // fragment.
1504     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1505                       << *DII << '\n');
1506     return;
1507   }
1508 
1509   BasicBlock *BB = APN->getParent();
1510   auto InsertionPt = BB->getFirstInsertionPt();
1511 
1512   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1513 
1514   // The block may be a catchswitch block, which does not have a valid
1515   // insertion point.
1516   // FIXME: Insert dbg.value markers in the successors when appropriate.
1517   if (InsertionPt != BB->end())
1518     Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt);
1519 }
1520 
1521 /// Determine whether this alloca is either a VLA or an array.
1522 static bool isArray(AllocaInst *AI) {
1523   return AI->isArrayAllocation() ||
1524          (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1525 }
1526 
1527 /// Determine whether this alloca is a structure.
1528 static bool isStructure(AllocaInst *AI) {
1529   return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1530 }
1531 
1532 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1533 /// of llvm.dbg.value intrinsics.
1534 bool llvm::LowerDbgDeclare(Function &F) {
1535   bool Changed = false;
1536   DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1537   SmallVector<DbgDeclareInst *, 4> Dbgs;
1538   for (auto &FI : F)
1539     for (Instruction &BI : FI)
1540       if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1541         Dbgs.push_back(DDI);
1542 
1543   if (Dbgs.empty())
1544     return Changed;
1545 
1546   for (auto &I : Dbgs) {
1547     DbgDeclareInst *DDI = I;
1548     AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1549     // If this is an alloca for a scalar variable, insert a dbg.value
1550     // at each load and store to the alloca and erase the dbg.declare.
1551     // The dbg.values allow tracking a variable even if it is not
1552     // stored on the stack, while the dbg.declare can only describe
1553     // the stack slot (and at a lexical-scope granularity). Later
1554     // passes will attempt to elide the stack slot.
1555     if (!AI || isArray(AI) || isStructure(AI))
1556       continue;
1557 
1558     // A volatile load/store means that the alloca can't be elided anyway.
1559     if (llvm::any_of(AI->users(), [](User *U) -> bool {
1560           if (LoadInst *LI = dyn_cast<LoadInst>(U))
1561             return LI->isVolatile();
1562           if (StoreInst *SI = dyn_cast<StoreInst>(U))
1563             return SI->isVolatile();
1564           return false;
1565         }))
1566       continue;
1567 
1568     SmallVector<const Value *, 8> WorkList;
1569     WorkList.push_back(AI);
1570     while (!WorkList.empty()) {
1571       const Value *V = WorkList.pop_back_val();
1572       for (auto &AIUse : V->uses()) {
1573         User *U = AIUse.getUser();
1574         if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1575           if (AIUse.getOperandNo() == 1)
1576             ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1577         } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1578           ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1579         } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1580           // This is a call by-value or some other instruction that takes a
1581           // pointer to the variable. Insert a *value* intrinsic that describes
1582           // the variable by dereferencing the alloca.
1583           if (!CI->isLifetimeStartOrEnd()) {
1584             DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr);
1585             auto *DerefExpr =
1586                 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1587             DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr,
1588                                         NewLoc, CI);
1589           }
1590         } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1591           if (BI->getType()->isPointerTy())
1592             WorkList.push_back(BI);
1593         }
1594       }
1595     }
1596     DDI->eraseFromParent();
1597     Changed = true;
1598   }
1599 
1600   if (Changed)
1601   for (BasicBlock &BB : F)
1602     RemoveRedundantDbgInstrs(&BB);
1603 
1604   return Changed;
1605 }
1606 
1607 /// Propagate dbg.value intrinsics through the newly inserted PHIs.
1608 void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1609                                     SmallVectorImpl<PHINode *> &InsertedPHIs) {
1610   assert(BB && "No BasicBlock to clone dbg.value(s) from.");
1611   if (InsertedPHIs.size() == 0)
1612     return;
1613 
1614   // Map existing PHI nodes to their dbg.values.
1615   ValueToValueMapTy DbgValueMap;
1616   for (auto &I : *BB) {
1617     if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1618       for (Value *V : DbgII->location_ops())
1619         if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1620           DbgValueMap.insert({Loc, DbgII});
1621     }
1622   }
1623   if (DbgValueMap.size() == 0)
1624     return;
1625 
1626   // Map a pair of the destination BB and old dbg.value to the new dbg.value,
1627   // so that if a dbg.value is being rewritten to use more than one of the
1628   // inserted PHIs in the same destination BB, we can update the same dbg.value
1629   // with all the new PHIs instead of creating one copy for each.
1630   MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
1631             DbgVariableIntrinsic *>
1632       NewDbgValueMap;
1633   // Then iterate through the new PHIs and look to see if they use one of the
1634   // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
1635   // propagate the info through the new PHI. If we use more than one new PHI in
1636   // a single destination BB with the same old dbg.value, merge the updates so
1637   // that we get a single new dbg.value with all the new PHIs.
1638   for (auto PHI : InsertedPHIs) {
1639     BasicBlock *Parent = PHI->getParent();
1640     // Avoid inserting an intrinsic into an EH block.
1641     if (Parent->getFirstNonPHI()->isEHPad())
1642       continue;
1643     for (auto VI : PHI->operand_values()) {
1644       auto V = DbgValueMap.find(VI);
1645       if (V != DbgValueMap.end()) {
1646         auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
1647         auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1648         if (NewDI == NewDbgValueMap.end()) {
1649           auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
1650           NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1651         }
1652         DbgVariableIntrinsic *NewDbgII = NewDI->second;
1653         // If PHI contains VI as an operand more than once, we may
1654         // replaced it in NewDbgII; confirm that it is present.
1655         if (is_contained(NewDbgII->location_ops(), VI))
1656           NewDbgII->replaceVariableLocationOp(VI, PHI);
1657       }
1658     }
1659   }
1660   // Insert thew new dbg.values into their destination blocks.
1661   for (auto DI : NewDbgValueMap) {
1662     BasicBlock *Parent = DI.first.first;
1663     auto *NewDbgII = DI.second;
1664     auto InsertionPt = Parent->getFirstInsertionPt();
1665     assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1666     NewDbgII->insertBefore(&*InsertionPt);
1667   }
1668 }
1669 
1670 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1671                              DIBuilder &Builder, uint8_t DIExprFlags,
1672                              int Offset) {
1673   auto DbgAddrs = FindDbgAddrUses(Address);
1674   for (DbgVariableIntrinsic *DII : DbgAddrs) {
1675     const DebugLoc &Loc = DII->getDebugLoc();
1676     auto *DIVar = DII->getVariable();
1677     auto *DIExpr = DII->getExpression();
1678     assert(DIVar && "Missing variable");
1679     DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1680     // Insert llvm.dbg.declare immediately before DII, and remove old
1681     // llvm.dbg.declare.
1682     Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII);
1683     DII->eraseFromParent();
1684   }
1685   return !DbgAddrs.empty();
1686 }
1687 
1688 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1689                                         DIBuilder &Builder, int Offset) {
1690   const DebugLoc &Loc = DVI->getDebugLoc();
1691   auto *DIVar = DVI->getVariable();
1692   auto *DIExpr = DVI->getExpression();
1693   assert(DIVar && "Missing variable");
1694 
1695   // This is an alloca-based llvm.dbg.value. The first thing it should do with
1696   // the alloca pointer is dereference it. Otherwise we don't know how to handle
1697   // it and give up.
1698   if (!DIExpr || DIExpr->getNumElements() < 1 ||
1699       DIExpr->getElement(0) != dwarf::DW_OP_deref)
1700     return;
1701 
1702   // Insert the offset before the first deref.
1703   // We could just change the offset argument of dbg.value, but it's unsigned...
1704   if (Offset)
1705     DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1706 
1707   Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1708   DVI->eraseFromParent();
1709 }
1710 
1711 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1712                                     DIBuilder &Builder, int Offset) {
1713   if (auto *L = LocalAsMetadata::getIfExists(AI))
1714     if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1715       for (Use &U : llvm::make_early_inc_range(MDV->uses()))
1716         if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1717           replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1718 }
1719 
1720 /// Where possible to salvage debug information for \p I do so
1721 /// and return True. If not possible mark undef and return False.
1722 void llvm::salvageDebugInfo(Instruction &I) {
1723   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
1724   findDbgUsers(DbgUsers, &I);
1725   salvageDebugInfoForDbgValues(I, DbgUsers);
1726 }
1727 
1728 void llvm::salvageDebugInfoForDbgValues(
1729     Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) {
1730   bool Salvaged = false;
1731 
1732   for (auto *DII : DbgUsers) {
1733     // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
1734     // are implicitly pointing out the value as a DWARF memory location
1735     // description.
1736     bool StackValue = isa<DbgValueInst>(DII);
1737     auto DIILocation = DII->location_ops();
1738     assert(
1739         is_contained(DIILocation, &I) &&
1740         "DbgVariableIntrinsic must use salvaged instruction as its location");
1741     unsigned LocNo = std::distance(DIILocation.begin(), find(DIILocation, &I));
1742 
1743     DIExpression *DIExpr =
1744         salvageDebugInfoImpl(I, DII->getExpression(), StackValue, LocNo);
1745 
1746     // salvageDebugInfoImpl should fail on examining the first element of
1747     // DbgUsers, or none of them.
1748     if (!DIExpr)
1749       break;
1750 
1751     DII->replaceVariableLocationOp(&I, I.getOperand(0));
1752     DII->setExpression(DIExpr);
1753     LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1754     Salvaged = true;
1755   }
1756 
1757   if (Salvaged)
1758     return;
1759 
1760   for (auto *DII : DbgUsers) {
1761     Value *Undef = UndefValue::get(I.getType());
1762     DII->replaceVariableLocationOp(&I, Undef);
1763   }
1764 }
1765 
1766 bool getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
1767                          SmallVectorImpl<uint64_t> &Opcodes) {
1768   unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
1769   // Rewrite a constant GEP into a DIExpression.
1770   APInt ConstantOffset(BitWidth, 0);
1771   if (!GEP->accumulateConstantOffset(DL, ConstantOffset))
1772     return false;
1773   DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
1774   return true;
1775 }
1776 
1777 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
1778   switch (Opcode) {
1779   case Instruction::Add:
1780     return dwarf::DW_OP_plus;
1781   case Instruction::Sub:
1782     return dwarf::DW_OP_minus;
1783   case Instruction::Mul:
1784     return dwarf::DW_OP_mul;
1785   case Instruction::SDiv:
1786     return dwarf::DW_OP_div;
1787   case Instruction::SRem:
1788     return dwarf::DW_OP_mod;
1789   case Instruction::Or:
1790     return dwarf::DW_OP_or;
1791   case Instruction::And:
1792     return dwarf::DW_OP_and;
1793   case Instruction::Xor:
1794     return dwarf::DW_OP_xor;
1795   case Instruction::Shl:
1796     return dwarf::DW_OP_shl;
1797   case Instruction::LShr:
1798     return dwarf::DW_OP_shr;
1799   case Instruction::AShr:
1800     return dwarf::DW_OP_shra;
1801   default:
1802     // TODO: Salvage from each kind of binop we know about.
1803     return 0;
1804   }
1805 }
1806 
1807 bool getSalvageOpsForBinOp(BinaryOperator *BI,
1808                            SmallVectorImpl<uint64_t> &Opcodes) {
1809   // Rewrite binary operations with constant integer operands.
1810   auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
1811   if (!ConstInt || ConstInt->getBitWidth() > 64)
1812     return false;
1813   uint64_t Val = ConstInt->getSExtValue();
1814   Instruction::BinaryOps BinOpcode = BI->getOpcode();
1815   // Add or Sub Instructions with a constant operand can potentially be
1816   // simplified.
1817   if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
1818     uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
1819     DIExpression::appendOffset(Opcodes, Offset);
1820     return true;
1821   }
1822   // Add constant int operand to expression stack.
1823   Opcodes.append({dwarf::DW_OP_constu, Val});
1824 
1825   // Add salvaged binary operator to expression stack, if it has a valid
1826   // representation in a DIExpression.
1827   uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
1828   if (!DwarfBinOp)
1829     return false;
1830   Opcodes.push_back(DwarfBinOp);
1831 
1832   return true;
1833 }
1834 
1835 DIExpression *llvm::salvageDebugInfoImpl(Instruction &I,
1836                                          DIExpression *SrcDIExpr,
1837                                          bool WithStackValue, unsigned LocNo) {
1838   auto &M = *I.getModule();
1839   auto &DL = M.getDataLayout();
1840 
1841   // Apply a vector of opcodes to the source DIExpression.
1842   auto doSalvage = [&](SmallVectorImpl<uint64_t> &Ops) -> DIExpression * {
1843     DIExpression *DIExpr = SrcDIExpr;
1844     if (!Ops.empty()) {
1845       DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, LocNo, WithStackValue);
1846     }
1847     return DIExpr;
1848   };
1849 
1850   // initializer-list helper for applying operators to the source DIExpression.
1851   auto applyOps = [&](ArrayRef<uint64_t> Opcodes) -> DIExpression * {
1852     SmallVector<uint64_t, 8> Ops(Opcodes.begin(), Opcodes.end());
1853     return doSalvage(Ops);
1854   };
1855 
1856   if (auto *CI = dyn_cast<CastInst>(&I)) {
1857     // No-op casts are irrelevant for debug info.
1858     if (CI->isNoopCast(DL))
1859       return SrcDIExpr;
1860 
1861     Type *Type = CI->getType();
1862     // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
1863     if (Type->isVectorTy() ||
1864         !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I)))
1865       return nullptr;
1866 
1867     Value *FromValue = CI->getOperand(0);
1868     unsigned FromTypeBitSize = FromValue->getType()->getScalarSizeInBits();
1869     unsigned ToTypeBitSize = Type->getScalarSizeInBits();
1870 
1871     return applyOps(DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
1872                                             isa<SExtInst>(&I)));
1873   }
1874 
1875   SmallVector<uint64_t, 8> Ops;
1876   if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1877     if (getSalvageOpsForGEP(GEP, DL, Ops))
1878       return doSalvage(Ops);
1879   } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1880     if (getSalvageOpsForBinOp(BI, Ops))
1881       return doSalvage(Ops);
1882   }
1883     // *Not* to do: we should not attempt to salvage load instructions,
1884     // because the validity and lifetime of a dbg.value containing
1885     // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
1886   return nullptr;
1887 }
1888 
1889 /// A replacement for a dbg.value expression.
1890 using DbgValReplacement = Optional<DIExpression *>;
1891 
1892 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
1893 /// possibly moving/undefing users to prevent use-before-def. Returns true if
1894 /// changes are made.
1895 static bool rewriteDebugUsers(
1896     Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
1897     function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) {
1898   // Find debug users of From.
1899   SmallVector<DbgVariableIntrinsic *, 1> Users;
1900   findDbgUsers(Users, &From);
1901   if (Users.empty())
1902     return false;
1903 
1904   // Prevent use-before-def of To.
1905   bool Changed = false;
1906   SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
1907   if (isa<Instruction>(&To)) {
1908     bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
1909 
1910     for (auto *DII : Users) {
1911       // It's common to see a debug user between From and DomPoint. Move it
1912       // after DomPoint to preserve the variable update without any reordering.
1913       if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
1914         LLVM_DEBUG(dbgs() << "MOVE:  " << *DII << '\n');
1915         DII->moveAfter(&DomPoint);
1916         Changed = true;
1917 
1918       // Users which otherwise aren't dominated by the replacement value must
1919       // be salvaged or deleted.
1920       } else if (!DT.dominates(&DomPoint, DII)) {
1921         UndefOrSalvage.insert(DII);
1922       }
1923     }
1924   }
1925 
1926   // Update debug users without use-before-def risk.
1927   for (auto *DII : Users) {
1928     if (UndefOrSalvage.count(DII))
1929       continue;
1930 
1931     DbgValReplacement DVR = RewriteExpr(*DII);
1932     if (!DVR)
1933       continue;
1934 
1935     DII->replaceVariableLocationOp(&From, &To);
1936     DII->setExpression(*DVR);
1937     LLVM_DEBUG(dbgs() << "REWRITE:  " << *DII << '\n');
1938     Changed = true;
1939   }
1940 
1941   if (!UndefOrSalvage.empty()) {
1942     // Try to salvage the remaining debug users.
1943     salvageDebugInfo(From);
1944     Changed = true;
1945   }
1946 
1947   return Changed;
1948 }
1949 
1950 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
1951 /// losslessly preserve the bits and semantics of the value. This predicate is
1952 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
1953 ///
1954 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it
1955 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
1956 /// and also does not allow lossless pointer <-> integer conversions.
1957 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
1958                                          Type *ToTy) {
1959   // Trivially compatible types.
1960   if (FromTy == ToTy)
1961     return true;
1962 
1963   // Handle compatible pointer <-> integer conversions.
1964   if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
1965     bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
1966     bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
1967                               !DL.isNonIntegralPointerType(ToTy);
1968     return SameSize && LosslessConversion;
1969   }
1970 
1971   // TODO: This is not exhaustive.
1972   return false;
1973 }
1974 
1975 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
1976                                  Instruction &DomPoint, DominatorTree &DT) {
1977   // Exit early if From has no debug users.
1978   if (!From.isUsedByMetadata())
1979     return false;
1980 
1981   assert(&From != &To && "Can't replace something with itself");
1982 
1983   Type *FromTy = From.getType();
1984   Type *ToTy = To.getType();
1985 
1986   auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
1987     return DII.getExpression();
1988   };
1989 
1990   // Handle no-op conversions.
1991   Module &M = *From.getModule();
1992   const DataLayout &DL = M.getDataLayout();
1993   if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
1994     return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
1995 
1996   // Handle integer-to-integer widening and narrowing.
1997   // FIXME: Use DW_OP_convert when it's available everywhere.
1998   if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
1999     uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2000     uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2001     assert(FromBits != ToBits && "Unexpected no-op conversion");
2002 
2003     // When the width of the result grows, assume that a debugger will only
2004     // access the low `FromBits` bits when inspecting the source variable.
2005     if (FromBits < ToBits)
2006       return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2007 
2008     // The width of the result has shrunk. Use sign/zero extension to describe
2009     // the source variable's high bits.
2010     auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2011       DILocalVariable *Var = DII.getVariable();
2012 
2013       // Without knowing signedness, sign/zero extension isn't possible.
2014       auto Signedness = Var->getSignedness();
2015       if (!Signedness)
2016         return None;
2017 
2018       bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2019       return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2020                                      Signed);
2021     };
2022     return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt);
2023   }
2024 
2025   // TODO: Floating-point conversions, vectors.
2026   return false;
2027 }
2028 
2029 std::pair<unsigned, unsigned>
2030 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2031   unsigned NumDeadInst = 0;
2032   unsigned NumDeadDbgInst = 0;
2033   // Delete the instructions backwards, as it has a reduced likelihood of
2034   // having to update as many def-use and use-def chains.
2035   Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2036   while (EndInst != &BB->front()) {
2037     // Delete the next to last instruction.
2038     Instruction *Inst = &*--EndInst->getIterator();
2039     if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2040       Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
2041     if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2042       EndInst = Inst;
2043       continue;
2044     }
2045     if (isa<DbgInfoIntrinsic>(Inst))
2046       ++NumDeadDbgInst;
2047     else
2048       ++NumDeadInst;
2049     Inst->eraseFromParent();
2050   }
2051   return {NumDeadInst, NumDeadDbgInst};
2052 }
2053 
2054 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
2055                                    bool PreserveLCSSA, DomTreeUpdater *DTU,
2056                                    MemorySSAUpdater *MSSAU) {
2057   BasicBlock *BB = I->getParent();
2058 
2059   if (MSSAU)
2060     MSSAU->changeToUnreachable(I);
2061 
2062   SmallSet<BasicBlock *, 8> UniqueSuccessors;
2063 
2064   // Loop over all of the successors, removing BB's entry from any PHI
2065   // nodes.
2066   for (BasicBlock *Successor : successors(BB)) {
2067     Successor->removePredecessor(BB, PreserveLCSSA);
2068     if (DTU)
2069       UniqueSuccessors.insert(Successor);
2070   }
2071   // Insert a call to llvm.trap right before this.  This turns the undefined
2072   // behavior into a hard fail instead of falling through into random code.
2073   if (UseLLVMTrap) {
2074     Function *TrapFn =
2075       Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
2076     CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
2077     CallTrap->setDebugLoc(I->getDebugLoc());
2078   }
2079   auto *UI = new UnreachableInst(I->getContext(), I);
2080   UI->setDebugLoc(I->getDebugLoc());
2081 
2082   // All instructions after this are dead.
2083   unsigned NumInstrsRemoved = 0;
2084   BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2085   while (BBI != BBE) {
2086     if (!BBI->use_empty())
2087       BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
2088     BB->getInstList().erase(BBI++);
2089     ++NumInstrsRemoved;
2090   }
2091   if (DTU) {
2092     SmallVector<DominatorTree::UpdateType, 8> Updates;
2093     Updates.reserve(UniqueSuccessors.size());
2094     for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2095       Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2096     DTU->applyUpdates(Updates);
2097   }
2098   return NumInstrsRemoved;
2099 }
2100 
2101 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2102   SmallVector<Value *, 8> Args(II->args());
2103   SmallVector<OperandBundleDef, 1> OpBundles;
2104   II->getOperandBundlesAsDefs(OpBundles);
2105   CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2106                                        II->getCalledOperand(), Args, OpBundles);
2107   NewCall->setCallingConv(II->getCallingConv());
2108   NewCall->setAttributes(II->getAttributes());
2109   NewCall->setDebugLoc(II->getDebugLoc());
2110   NewCall->copyMetadata(*II);
2111 
2112   // If the invoke had profile metadata, try converting them for CallInst.
2113   uint64_t TotalWeight;
2114   if (NewCall->extractProfTotalWeight(TotalWeight)) {
2115     // Set the total weight if it fits into i32, otherwise reset.
2116     MDBuilder MDB(NewCall->getContext());
2117     auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2118                           ? nullptr
2119                           : MDB.createBranchWeights({uint32_t(TotalWeight)});
2120     NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2121   }
2122 
2123   return NewCall;
2124 }
2125 
2126 /// changeToCall - Convert the specified invoke into a normal call.
2127 void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2128   CallInst *NewCall = createCallMatchingInvoke(II);
2129   NewCall->takeName(II);
2130   NewCall->insertBefore(II);
2131   II->replaceAllUsesWith(NewCall);
2132 
2133   // Follow the call by a branch to the normal destination.
2134   BasicBlock *NormalDestBB = II->getNormalDest();
2135   BranchInst::Create(NormalDestBB, II);
2136 
2137   // Update PHI nodes in the unwind destination
2138   BasicBlock *BB = II->getParent();
2139   BasicBlock *UnwindDestBB = II->getUnwindDest();
2140   UnwindDestBB->removePredecessor(BB);
2141   II->eraseFromParent();
2142   if (DTU)
2143     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2144 }
2145 
2146 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2147                                                    BasicBlock *UnwindEdge,
2148                                                    DomTreeUpdater *DTU) {
2149   BasicBlock *BB = CI->getParent();
2150 
2151   // Convert this function call into an invoke instruction.  First, split the
2152   // basic block.
2153   BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2154                                  CI->getName() + ".noexc");
2155 
2156   // Delete the unconditional branch inserted by SplitBlock
2157   BB->getInstList().pop_back();
2158 
2159   // Create the new invoke instruction.
2160   SmallVector<Value *, 8> InvokeArgs(CI->args());
2161   SmallVector<OperandBundleDef, 1> OpBundles;
2162 
2163   CI->getOperandBundlesAsDefs(OpBundles);
2164 
2165   // Note: we're round tripping operand bundles through memory here, and that
2166   // can potentially be avoided with a cleverer API design that we do not have
2167   // as of this time.
2168 
2169   InvokeInst *II =
2170       InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
2171                          UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2172   II->setDebugLoc(CI->getDebugLoc());
2173   II->setCallingConv(CI->getCallingConv());
2174   II->setAttributes(CI->getAttributes());
2175 
2176   if (DTU)
2177     DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2178 
2179   // Make sure that anything using the call now uses the invoke!  This also
2180   // updates the CallGraph if present, because it uses a WeakTrackingVH.
2181   CI->replaceAllUsesWith(II);
2182 
2183   // Delete the original call
2184   Split->getInstList().pop_front();
2185   return Split;
2186 }
2187 
2188 static bool markAliveBlocks(Function &F,
2189                             SmallPtrSetImpl<BasicBlock *> &Reachable,
2190                             DomTreeUpdater *DTU = nullptr) {
2191   SmallVector<BasicBlock*, 128> Worklist;
2192   BasicBlock *BB = &F.front();
2193   Worklist.push_back(BB);
2194   Reachable.insert(BB);
2195   bool Changed = false;
2196   do {
2197     BB = Worklist.pop_back_val();
2198 
2199     // Do a quick scan of the basic block, turning any obviously unreachable
2200     // instructions into LLVM unreachable insts.  The instruction combining pass
2201     // canonicalizes unreachable insts into stores to null or undef.
2202     for (Instruction &I : *BB) {
2203       if (auto *CI = dyn_cast<CallInst>(&I)) {
2204         Value *Callee = CI->getCalledOperand();
2205         // Handle intrinsic calls.
2206         if (Function *F = dyn_cast<Function>(Callee)) {
2207           auto IntrinsicID = F->getIntrinsicID();
2208           // Assumptions that are known to be false are equivalent to
2209           // unreachable. Also, if the condition is undefined, then we make the
2210           // choice most beneficial to the optimizer, and choose that to also be
2211           // unreachable.
2212           if (IntrinsicID == Intrinsic::assume) {
2213             if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2214               // Don't insert a call to llvm.trap right before the unreachable.
2215               changeToUnreachable(CI, false, false, DTU);
2216               Changed = true;
2217               break;
2218             }
2219           } else if (IntrinsicID == Intrinsic::experimental_guard) {
2220             // A call to the guard intrinsic bails out of the current
2221             // compilation unit if the predicate passed to it is false. If the
2222             // predicate is a constant false, then we know the guard will bail
2223             // out of the current compile unconditionally, so all code following
2224             // it is dead.
2225             //
2226             // Note: unlike in llvm.assume, it is not "obviously profitable" for
2227             // guards to treat `undef` as `false` since a guard on `undef` can
2228             // still be useful for widening.
2229             if (match(CI->getArgOperand(0), m_Zero()))
2230               if (!isa<UnreachableInst>(CI->getNextNode())) {
2231                 changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false,
2232                                     false, DTU);
2233                 Changed = true;
2234                 break;
2235               }
2236           }
2237         } else if ((isa<ConstantPointerNull>(Callee) &&
2238                     !NullPointerIsDefined(CI->getFunction())) ||
2239                    isa<UndefValue>(Callee)) {
2240           changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DTU);
2241           Changed = true;
2242           break;
2243         }
2244         if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2245           // If we found a call to a no-return function, insert an unreachable
2246           // instruction after it.  Make sure there isn't *already* one there
2247           // though.
2248           if (!isa<UnreachableInst>(CI->getNextNode())) {
2249             // Don't insert a call to llvm.trap right before the unreachable.
2250             changeToUnreachable(CI->getNextNode(), false, false, DTU);
2251             Changed = true;
2252           }
2253           break;
2254         }
2255       } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2256         // Store to undef and store to null are undefined and used to signal
2257         // that they should be changed to unreachable by passes that can't
2258         // modify the CFG.
2259 
2260         // Don't touch volatile stores.
2261         if (SI->isVolatile()) continue;
2262 
2263         Value *Ptr = SI->getOperand(1);
2264 
2265         if (isa<UndefValue>(Ptr) ||
2266             (isa<ConstantPointerNull>(Ptr) &&
2267              !NullPointerIsDefined(SI->getFunction(),
2268                                    SI->getPointerAddressSpace()))) {
2269           changeToUnreachable(SI, true, false, DTU);
2270           Changed = true;
2271           break;
2272         }
2273       }
2274     }
2275 
2276     Instruction *Terminator = BB->getTerminator();
2277     if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2278       // Turn invokes that call 'nounwind' functions into ordinary calls.
2279       Value *Callee = II->getCalledOperand();
2280       if ((isa<ConstantPointerNull>(Callee) &&
2281            !NullPointerIsDefined(BB->getParent())) ||
2282           isa<UndefValue>(Callee)) {
2283         changeToUnreachable(II, true, false, DTU);
2284         Changed = true;
2285       } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2286         if (II->use_empty() && II->onlyReadsMemory()) {
2287           // jump to the normal destination branch.
2288           BasicBlock *NormalDestBB = II->getNormalDest();
2289           BasicBlock *UnwindDestBB = II->getUnwindDest();
2290           BranchInst::Create(NormalDestBB, II);
2291           UnwindDestBB->removePredecessor(II->getParent());
2292           II->eraseFromParent();
2293           if (DTU)
2294             DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2295         } else
2296           changeToCall(II, DTU);
2297         Changed = true;
2298       }
2299     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2300       // Remove catchpads which cannot be reached.
2301       struct CatchPadDenseMapInfo {
2302         static CatchPadInst *getEmptyKey() {
2303           return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2304         }
2305 
2306         static CatchPadInst *getTombstoneKey() {
2307           return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2308         }
2309 
2310         static unsigned getHashValue(CatchPadInst *CatchPad) {
2311           return static_cast<unsigned>(hash_combine_range(
2312               CatchPad->value_op_begin(), CatchPad->value_op_end()));
2313         }
2314 
2315         static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2316           if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2317               RHS == getEmptyKey() || RHS == getTombstoneKey())
2318             return LHS == RHS;
2319           return LHS->isIdenticalTo(RHS);
2320         }
2321       };
2322 
2323       SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2324       // Set of unique CatchPads.
2325       SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2326                     CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2327           HandlerSet;
2328       detail::DenseSetEmpty Empty;
2329       for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2330                                              E = CatchSwitch->handler_end();
2331            I != E; ++I) {
2332         BasicBlock *HandlerBB = *I;
2333         if (DTU)
2334           ++NumPerSuccessorCases[HandlerBB];
2335         auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
2336         if (!HandlerSet.insert({CatchPad, Empty}).second) {
2337           if (DTU)
2338             --NumPerSuccessorCases[HandlerBB];
2339           CatchSwitch->removeHandler(I);
2340           --I;
2341           --E;
2342           Changed = true;
2343         }
2344       }
2345       if (DTU) {
2346         std::vector<DominatorTree::UpdateType> Updates;
2347         for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2348           if (I.second == 0)
2349             Updates.push_back({DominatorTree::Delete, BB, I.first});
2350         DTU->applyUpdates(Updates);
2351       }
2352     }
2353 
2354     Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2355     for (BasicBlock *Successor : successors(BB))
2356       if (Reachable.insert(Successor).second)
2357         Worklist.push_back(Successor);
2358   } while (!Worklist.empty());
2359   return Changed;
2360 }
2361 
2362 void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
2363   Instruction *TI = BB->getTerminator();
2364 
2365   if (auto *II = dyn_cast<InvokeInst>(TI)) {
2366     changeToCall(II, DTU);
2367     return;
2368   }
2369 
2370   Instruction *NewTI;
2371   BasicBlock *UnwindDest;
2372 
2373   if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2374     NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
2375     UnwindDest = CRI->getUnwindDest();
2376   } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2377     auto *NewCatchSwitch = CatchSwitchInst::Create(
2378         CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2379         CatchSwitch->getName(), CatchSwitch);
2380     for (BasicBlock *PadBB : CatchSwitch->handlers())
2381       NewCatchSwitch->addHandler(PadBB);
2382 
2383     NewTI = NewCatchSwitch;
2384     UnwindDest = CatchSwitch->getUnwindDest();
2385   } else {
2386     llvm_unreachable("Could not find unwind successor");
2387   }
2388 
2389   NewTI->takeName(TI);
2390   NewTI->setDebugLoc(TI->getDebugLoc());
2391   UnwindDest->removePredecessor(BB);
2392   TI->replaceAllUsesWith(NewTI);
2393   TI->eraseFromParent();
2394   if (DTU)
2395     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2396 }
2397 
2398 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
2399 /// if they are in a dead cycle.  Return true if a change was made, false
2400 /// otherwise.
2401 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
2402                                    MemorySSAUpdater *MSSAU) {
2403   SmallPtrSet<BasicBlock *, 16> Reachable;
2404   bool Changed = markAliveBlocks(F, Reachable, DTU);
2405 
2406   // If there are unreachable blocks in the CFG...
2407   if (Reachable.size() == F.size())
2408     return Changed;
2409 
2410   assert(Reachable.size() < F.size());
2411 
2412   // Are there any blocks left to actually delete?
2413   SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2414   for (BasicBlock &BB : F) {
2415     // Skip reachable basic blocks
2416     if (Reachable.count(&BB))
2417       continue;
2418     // Skip already-deleted blocks
2419     if (DTU && DTU->isBBPendingDeletion(&BB))
2420       continue;
2421     BlocksToRemove.insert(&BB);
2422   }
2423 
2424   if (BlocksToRemove.empty())
2425     return Changed;
2426 
2427   Changed = true;
2428   NumRemoved += BlocksToRemove.size();
2429 
2430   if (MSSAU)
2431     MSSAU->removeBlocks(BlocksToRemove);
2432 
2433   // Loop over all of the basic blocks that are up for removal, dropping all of
2434   // their internal references. Update DTU if available.
2435   std::vector<DominatorTree::UpdateType> Updates;
2436   for (auto *BB : BlocksToRemove) {
2437     SmallSet<BasicBlock *, 8> UniqueSuccessors;
2438     for (BasicBlock *Successor : successors(BB)) {
2439       // Only remove references to BB in reachable successors of BB.
2440       if (Reachable.count(Successor))
2441         Successor->removePredecessor(BB);
2442       if (DTU)
2443         UniqueSuccessors.insert(Successor);
2444     }
2445     BB->dropAllReferences();
2446     if (DTU) {
2447       Instruction *TI = BB->getTerminator();
2448       assert(TI && "Basic block should have a terminator");
2449       // Terminators like invoke can have users. We have to replace their users,
2450       // before removing them.
2451       if (!TI->use_empty())
2452         TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
2453       TI->eraseFromParent();
2454       new UnreachableInst(BB->getContext(), BB);
2455       assert(succ_empty(BB) && "The successor list of BB isn't empty before "
2456                                "applying corresponding DTU updates.");
2457       Updates.reserve(Updates.size() + UniqueSuccessors.size());
2458       for (auto *UniqueSuccessor : UniqueSuccessors)
2459         Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2460     }
2461   }
2462 
2463   if (DTU) {
2464     DTU->applyUpdates(Updates);
2465     for (auto *BB : BlocksToRemove)
2466       DTU->deleteBB(BB);
2467   } else {
2468     for (auto *BB : BlocksToRemove)
2469       BB->eraseFromParent();
2470   }
2471 
2472   return Changed;
2473 }
2474 
2475 void llvm::combineMetadata(Instruction *K, const Instruction *J,
2476                            ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
2477   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2478   K->dropUnknownNonDebugMetadata(KnownIDs);
2479   K->getAllMetadataOtherThanDebugLoc(Metadata);
2480   for (const auto &MD : Metadata) {
2481     unsigned Kind = MD.first;
2482     MDNode *JMD = J->getMetadata(Kind);
2483     MDNode *KMD = MD.second;
2484 
2485     switch (Kind) {
2486       default:
2487         K->setMetadata(Kind, nullptr); // Remove unknown metadata
2488         break;
2489       case LLVMContext::MD_dbg:
2490         llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2491       case LLVMContext::MD_tbaa:
2492         K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2493         break;
2494       case LLVMContext::MD_alias_scope:
2495         K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2496         break;
2497       case LLVMContext::MD_noalias:
2498       case LLVMContext::MD_mem_parallel_loop_access:
2499         K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2500         break;
2501       case LLVMContext::MD_access_group:
2502         K->setMetadata(LLVMContext::MD_access_group,
2503                        intersectAccessGroups(K, J));
2504         break;
2505       case LLVMContext::MD_range:
2506 
2507         // If K does move, use most generic range. Otherwise keep the range of
2508         // K.
2509         if (DoesKMove)
2510           // FIXME: If K does move, we should drop the range info and nonnull.
2511           //        Currently this function is used with DoesKMove in passes
2512           //        doing hoisting/sinking and the current behavior of using the
2513           //        most generic range is correct in those cases.
2514           K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2515         break;
2516       case LLVMContext::MD_fpmath:
2517         K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2518         break;
2519       case LLVMContext::MD_invariant_load:
2520         // Only set the !invariant.load if it is present in both instructions.
2521         K->setMetadata(Kind, JMD);
2522         break;
2523       case LLVMContext::MD_nonnull:
2524         // If K does move, keep nonull if it is present in both instructions.
2525         if (DoesKMove)
2526           K->setMetadata(Kind, JMD);
2527         break;
2528       case LLVMContext::MD_invariant_group:
2529         // Preserve !invariant.group in K.
2530         break;
2531       case LLVMContext::MD_align:
2532         K->setMetadata(Kind,
2533           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2534         break;
2535       case LLVMContext::MD_dereferenceable:
2536       case LLVMContext::MD_dereferenceable_or_null:
2537         K->setMetadata(Kind,
2538           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2539         break;
2540       case LLVMContext::MD_preserve_access_index:
2541         // Preserve !preserve.access.index in K.
2542         break;
2543     }
2544   }
2545   // Set !invariant.group from J if J has it. If both instructions have it
2546   // then we will just pick it from J - even when they are different.
2547   // Also make sure that K is load or store - f.e. combining bitcast with load
2548   // could produce bitcast with invariant.group metadata, which is invalid.
2549   // FIXME: we should try to preserve both invariant.group md if they are
2550   // different, but right now instruction can only have one invariant.group.
2551   if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2552     if (isa<LoadInst>(K) || isa<StoreInst>(K))
2553       K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2554 }
2555 
2556 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
2557                                  bool KDominatesJ) {
2558   unsigned KnownIDs[] = {
2559       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2560       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2561       LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
2562       LLVMContext::MD_invariant_group, LLVMContext::MD_align,
2563       LLVMContext::MD_dereferenceable,
2564       LLVMContext::MD_dereferenceable_or_null,
2565       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2566   combineMetadata(K, J, KnownIDs, KDominatesJ);
2567 }
2568 
2569 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
2570   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
2571   Source.getAllMetadata(MD);
2572   MDBuilder MDB(Dest.getContext());
2573   Type *NewType = Dest.getType();
2574   const DataLayout &DL = Source.getModule()->getDataLayout();
2575   for (const auto &MDPair : MD) {
2576     unsigned ID = MDPair.first;
2577     MDNode *N = MDPair.second;
2578     // Note, essentially every kind of metadata should be preserved here! This
2579     // routine is supposed to clone a load instruction changing *only its type*.
2580     // The only metadata it makes sense to drop is metadata which is invalidated
2581     // when the pointer type changes. This should essentially never be the case
2582     // in LLVM, but we explicitly switch over only known metadata to be
2583     // conservatively correct. If you are adding metadata to LLVM which pertains
2584     // to loads, you almost certainly want to add it here.
2585     switch (ID) {
2586     case LLVMContext::MD_dbg:
2587     case LLVMContext::MD_tbaa:
2588     case LLVMContext::MD_prof:
2589     case LLVMContext::MD_fpmath:
2590     case LLVMContext::MD_tbaa_struct:
2591     case LLVMContext::MD_invariant_load:
2592     case LLVMContext::MD_alias_scope:
2593     case LLVMContext::MD_noalias:
2594     case LLVMContext::MD_nontemporal:
2595     case LLVMContext::MD_mem_parallel_loop_access:
2596     case LLVMContext::MD_access_group:
2597       // All of these directly apply.
2598       Dest.setMetadata(ID, N);
2599       break;
2600 
2601     case LLVMContext::MD_nonnull:
2602       copyNonnullMetadata(Source, N, Dest);
2603       break;
2604 
2605     case LLVMContext::MD_align:
2606     case LLVMContext::MD_dereferenceable:
2607     case LLVMContext::MD_dereferenceable_or_null:
2608       // These only directly apply if the new type is also a pointer.
2609       if (NewType->isPointerTy())
2610         Dest.setMetadata(ID, N);
2611       break;
2612 
2613     case LLVMContext::MD_range:
2614       copyRangeMetadata(DL, Source, N, Dest);
2615       break;
2616     }
2617   }
2618 }
2619 
2620 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
2621   auto *ReplInst = dyn_cast<Instruction>(Repl);
2622   if (!ReplInst)
2623     return;
2624 
2625   // Patch the replacement so that it is not more restrictive than the value
2626   // being replaced.
2627   // Note that if 'I' is a load being replaced by some operation,
2628   // for example, by an arithmetic operation, then andIRFlags()
2629   // would just erase all math flags from the original arithmetic
2630   // operation, which is clearly not wanted and not needed.
2631   if (!isa<LoadInst>(I))
2632     ReplInst->andIRFlags(I);
2633 
2634   // FIXME: If both the original and replacement value are part of the
2635   // same control-flow region (meaning that the execution of one
2636   // guarantees the execution of the other), then we can combine the
2637   // noalias scopes here and do better than the general conservative
2638   // answer used in combineMetadata().
2639 
2640   // In general, GVN unifies expressions over different control-flow
2641   // regions, and so we need a conservative combination of the noalias
2642   // scopes.
2643   static const unsigned KnownIDs[] = {
2644       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2645       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2646       LLVMContext::MD_fpmath,          LLVMContext::MD_invariant_load,
2647       LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull,
2648       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2649   combineMetadata(ReplInst, I, KnownIDs, false);
2650 }
2651 
2652 template <typename RootType, typename DominatesFn>
2653 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2654                                          const RootType &Root,
2655                                          const DominatesFn &Dominates) {
2656   assert(From->getType() == To->getType());
2657 
2658   unsigned Count = 0;
2659   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2660        UI != UE;) {
2661     Use &U = *UI++;
2662     if (!Dominates(Root, U))
2663       continue;
2664     U.set(To);
2665     LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()
2666                       << "' as " << *To << " in " << *U << "\n");
2667     ++Count;
2668   }
2669   return Count;
2670 }
2671 
2672 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2673    assert(From->getType() == To->getType());
2674    auto *BB = From->getParent();
2675    unsigned Count = 0;
2676 
2677   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2678        UI != UE;) {
2679     Use &U = *UI++;
2680     auto *I = cast<Instruction>(U.getUser());
2681     if (I->getParent() == BB)
2682       continue;
2683     U.set(To);
2684     ++Count;
2685   }
2686   return Count;
2687 }
2688 
2689 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2690                                         DominatorTree &DT,
2691                                         const BasicBlockEdge &Root) {
2692   auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2693     return DT.dominates(Root, U);
2694   };
2695   return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2696 }
2697 
2698 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2699                                         DominatorTree &DT,
2700                                         const BasicBlock *BB) {
2701   auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
2702     return DT.dominates(BB, U);
2703   };
2704   return ::replaceDominatedUsesWith(From, To, BB, Dominates);
2705 }
2706 
2707 bool llvm::callsGCLeafFunction(const CallBase *Call,
2708                                const TargetLibraryInfo &TLI) {
2709   // Check if the function is specifically marked as a gc leaf function.
2710   if (Call->hasFnAttr("gc-leaf-function"))
2711     return true;
2712   if (const Function *F = Call->getCalledFunction()) {
2713     if (F->hasFnAttribute("gc-leaf-function"))
2714       return true;
2715 
2716     if (auto IID = F->getIntrinsicID()) {
2717       // Most LLVM intrinsics do not take safepoints.
2718       return IID != Intrinsic::experimental_gc_statepoint &&
2719              IID != Intrinsic::experimental_deoptimize &&
2720              IID != Intrinsic::memcpy_element_unordered_atomic &&
2721              IID != Intrinsic::memmove_element_unordered_atomic;
2722     }
2723   }
2724 
2725   // Lib calls can be materialized by some passes, and won't be
2726   // marked as 'gc-leaf-function.' All available Libcalls are
2727   // GC-leaf.
2728   LibFunc LF;
2729   if (TLI.getLibFunc(*Call, LF)) {
2730     return TLI.has(LF);
2731   }
2732 
2733   return false;
2734 }
2735 
2736 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2737                                LoadInst &NewLI) {
2738   auto *NewTy = NewLI.getType();
2739 
2740   // This only directly applies if the new type is also a pointer.
2741   if (NewTy->isPointerTy()) {
2742     NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2743     return;
2744   }
2745 
2746   // The only other translation we can do is to integral loads with !range
2747   // metadata.
2748   if (!NewTy->isIntegerTy())
2749     return;
2750 
2751   MDBuilder MDB(NewLI.getContext());
2752   const Value *Ptr = OldLI.getPointerOperand();
2753   auto *ITy = cast<IntegerType>(NewTy);
2754   auto *NullInt = ConstantExpr::getPtrToInt(
2755       ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
2756   auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
2757   NewLI.setMetadata(LLVMContext::MD_range,
2758                     MDB.createRange(NonNullInt, NullInt));
2759 }
2760 
2761 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
2762                              MDNode *N, LoadInst &NewLI) {
2763   auto *NewTy = NewLI.getType();
2764 
2765   // Give up unless it is converted to a pointer where there is a single very
2766   // valuable mapping we can do reliably.
2767   // FIXME: It would be nice to propagate this in more ways, but the type
2768   // conversions make it hard.
2769   if (!NewTy->isPointerTy())
2770     return;
2771 
2772   unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
2773   if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
2774     MDNode *NN = MDNode::get(OldLI.getContext(), None);
2775     NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
2776   }
2777 }
2778 
2779 void llvm::dropDebugUsers(Instruction &I) {
2780   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2781   findDbgUsers(DbgUsers, &I);
2782   for (auto *DII : DbgUsers)
2783     DII->eraseFromParent();
2784 }
2785 
2786 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
2787                                     BasicBlock *BB) {
2788   // Since we are moving the instructions out of its basic block, we do not
2789   // retain their original debug locations (DILocations) and debug intrinsic
2790   // instructions.
2791   //
2792   // Doing so would degrade the debugging experience and adversely affect the
2793   // accuracy of profiling information.
2794   //
2795   // Currently, when hoisting the instructions, we take the following actions:
2796   // - Remove their debug intrinsic instructions.
2797   // - Set their debug locations to the values from the insertion point.
2798   //
2799   // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
2800   // need to be deleted, is because there will not be any instructions with a
2801   // DILocation in either branch left after performing the transformation. We
2802   // can only insert a dbg.value after the two branches are joined again.
2803   //
2804   // See PR38762, PR39243 for more details.
2805   //
2806   // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
2807   // encode predicated DIExpressions that yield different results on different
2808   // code paths.
2809 
2810   // A hoisted conditional probe should be treated as dangling so that it will
2811   // not be over-counted when the samples collected on the non-conditional path
2812   // are counted towards the conditional path. We leave it for the counts
2813   // inference algorithm to figure out a proper count for a danglng probe.
2814   moveAndDanglePseudoProbes(BB, InsertPt);
2815 
2816   for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
2817     Instruction *I = &*II;
2818     I->dropUnknownNonDebugMetadata();
2819     if (I->isUsedByMetadata())
2820       dropDebugUsers(*I);
2821     if (isa<DbgInfoIntrinsic>(I)) {
2822       // Remove DbgInfo Intrinsics.
2823       II = I->eraseFromParent();
2824       continue;
2825     }
2826     I->setDebugLoc(InsertPt->getDebugLoc());
2827     ++II;
2828   }
2829   DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(),
2830                                  BB->begin(),
2831                                  BB->getTerminator()->getIterator());
2832 }
2833 
2834 namespace {
2835 
2836 /// A potential constituent of a bitreverse or bswap expression. See
2837 /// collectBitParts for a fuller explanation.
2838 struct BitPart {
2839   BitPart(Value *P, unsigned BW) : Provider(P) {
2840     Provenance.resize(BW);
2841   }
2842 
2843   /// The Value that this is a bitreverse/bswap of.
2844   Value *Provider;
2845 
2846   /// The "provenance" of each bit. Provenance[A] = B means that bit A
2847   /// in Provider becomes bit B in the result of this expression.
2848   SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
2849 
2850   enum { Unset = -1 };
2851 };
2852 
2853 } // end anonymous namespace
2854 
2855 /// Analyze the specified subexpression and see if it is capable of providing
2856 /// pieces of a bswap or bitreverse. The subexpression provides a potential
2857 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
2858 /// the output of the expression came from a corresponding bit in some other
2859 /// value. This function is recursive, and the end result is a mapping of
2860 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
2861 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
2862 ///
2863 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
2864 /// that the expression deposits the low byte of %X into the high byte of the
2865 /// result and that all other bits are zero. This expression is accepted and a
2866 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2867 /// [0-7].
2868 ///
2869 /// For vector types, all analysis is performed at the per-element level. No
2870 /// cross-element analysis is supported (shuffle/insertion/reduction), and all
2871 /// constant masks must be splatted across all elements.
2872 ///
2873 /// To avoid revisiting values, the BitPart results are memoized into the
2874 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
2875 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2876 /// store BitParts objects, not pointers. As we need the concept of a nullptr
2877 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
2878 /// type instead to provide the same functionality.
2879 ///
2880 /// Because we pass around references into \c BPS, we must use a container that
2881 /// does not invalidate internal references (std::map instead of DenseMap).
2882 static const Optional<BitPart> &
2883 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2884                 std::map<Value *, Optional<BitPart>> &BPS, int Depth) {
2885   auto I = BPS.find(V);
2886   if (I != BPS.end())
2887     return I->second;
2888 
2889   auto &Result = BPS[V] = None;
2890   auto BitWidth = V->getType()->getScalarSizeInBits();
2891 
2892   // Can't do integer/elements > 128 bits.
2893   if (BitWidth > 128)
2894     return Result;
2895 
2896   // Prevent stack overflow by limiting the recursion depth
2897   if (Depth == BitPartRecursionMaxDepth) {
2898     LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
2899     return Result;
2900   }
2901 
2902   if (auto *I = dyn_cast<Instruction>(V)) {
2903     Value *X, *Y;
2904     const APInt *C;
2905 
2906     // If this is an or instruction, it may be an inner node of the bswap.
2907     if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
2908       const auto &A =
2909           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2910       const auto &B =
2911           collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2912       if (!A || !B)
2913         return Result;
2914 
2915       // Try and merge the two together.
2916       if (!A->Provider || A->Provider != B->Provider)
2917         return Result;
2918 
2919       Result = BitPart(A->Provider, BitWidth);
2920       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
2921         if (A->Provenance[BitIdx] != BitPart::Unset &&
2922             B->Provenance[BitIdx] != BitPart::Unset &&
2923             A->Provenance[BitIdx] != B->Provenance[BitIdx])
2924           return Result = None;
2925 
2926         if (A->Provenance[BitIdx] == BitPart::Unset)
2927           Result->Provenance[BitIdx] = B->Provenance[BitIdx];
2928         else
2929           Result->Provenance[BitIdx] = A->Provenance[BitIdx];
2930       }
2931 
2932       return Result;
2933     }
2934 
2935     // If this is a logical shift by a constant, recurse then shift the result.
2936     if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
2937       const APInt &BitShift = *C;
2938 
2939       // Ensure the shift amount is defined.
2940       if (BitShift.uge(BitWidth))
2941         return Result;
2942 
2943       const auto &Res =
2944           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2945       if (!Res)
2946         return Result;
2947       Result = Res;
2948 
2949       // Perform the "shift" on BitProvenance.
2950       auto &P = Result->Provenance;
2951       if (I->getOpcode() == Instruction::Shl) {
2952         P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
2953         P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
2954       } else {
2955         P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
2956         P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
2957       }
2958 
2959       return Result;
2960     }
2961 
2962     // If this is a logical 'and' with a mask that clears bits, recurse then
2963     // unset the appropriate bits.
2964     if (match(V, m_And(m_Value(X), m_APInt(C)))) {
2965       const APInt &AndMask = *C;
2966 
2967       // Check that the mask allows a multiple of 8 bits for a bswap, for an
2968       // early exit.
2969       unsigned NumMaskedBits = AndMask.countPopulation();
2970       if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
2971         return Result;
2972 
2973       const auto &Res =
2974           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2975       if (!Res)
2976         return Result;
2977       Result = Res;
2978 
2979       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
2980         // If the AndMask is zero for this bit, clear the bit.
2981         if (AndMask[BitIdx] == 0)
2982           Result->Provenance[BitIdx] = BitPart::Unset;
2983       return Result;
2984     }
2985 
2986     // If this is a zext instruction zero extend the result.
2987     if (match(V, m_ZExt(m_Value(X)))) {
2988       const auto &Res =
2989           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2990       if (!Res)
2991         return Result;
2992 
2993       Result = BitPart(Res->Provider, BitWidth);
2994       auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
2995       for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
2996         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
2997       for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
2998         Result->Provenance[BitIdx] = BitPart::Unset;
2999       return Result;
3000     }
3001 
3002     // If this is a truncate instruction, extract the lower bits.
3003     if (match(V, m_Trunc(m_Value(X)))) {
3004       const auto &Res =
3005           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3006       if (!Res)
3007         return Result;
3008 
3009       Result = BitPart(Res->Provider, BitWidth);
3010       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3011         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3012       return Result;
3013     }
3014 
3015     // BITREVERSE - most likely due to us previous matching a partial
3016     // bitreverse.
3017     if (match(V, m_BitReverse(m_Value(X)))) {
3018       const auto &Res =
3019           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3020       if (!Res)
3021         return Result;
3022 
3023       Result = BitPart(Res->Provider, BitWidth);
3024       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3025         Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3026       return Result;
3027     }
3028 
3029     // BSWAP - most likely due to us previous matching a partial bswap.
3030     if (match(V, m_BSwap(m_Value(X)))) {
3031       const auto &Res =
3032           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3033       if (!Res)
3034         return Result;
3035 
3036       unsigned ByteWidth = BitWidth / 8;
3037       Result = BitPart(Res->Provider, BitWidth);
3038       for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3039         unsigned ByteBitOfs = ByteIdx * 8;
3040         for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3041           Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3042               Res->Provenance[ByteBitOfs + BitIdx];
3043       }
3044       return Result;
3045     }
3046 
3047     // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3048     // amount (modulo).
3049     // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3050     // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3051     if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3052         match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3053       // We can treat fshr as a fshl by flipping the modulo amount.
3054       unsigned ModAmt = C->urem(BitWidth);
3055       if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3056         ModAmt = BitWidth - ModAmt;
3057 
3058       const auto &LHS =
3059           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3060       const auto &RHS =
3061           collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3062 
3063       // Check we have both sources and they are from the same provider.
3064       if (!LHS || !RHS || !LHS->Provider || LHS->Provider != RHS->Provider)
3065         return Result;
3066 
3067       unsigned StartBitRHS = BitWidth - ModAmt;
3068       Result = BitPart(LHS->Provider, BitWidth);
3069       for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3070         Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3071       for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3072         Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3073       return Result;
3074     }
3075   }
3076 
3077   // Okay, we got to something that isn't a shift, 'or' or 'and'.  This must be
3078   // the input value to the bswap/bitreverse.
3079   Result = BitPart(V, BitWidth);
3080   for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3081     Result->Provenance[BitIdx] = BitIdx;
3082   return Result;
3083 }
3084 
3085 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3086                                           unsigned BitWidth) {
3087   if (From % 8 != To % 8)
3088     return false;
3089   // Convert from bit indices to byte indices and check for a byte reversal.
3090   From >>= 3;
3091   To >>= 3;
3092   BitWidth >>= 3;
3093   return From == BitWidth - To - 1;
3094 }
3095 
3096 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3097                                                unsigned BitWidth) {
3098   return From == BitWidth - To - 1;
3099 }
3100 
3101 bool llvm::recognizeBSwapOrBitReverseIdiom(
3102     Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3103     SmallVectorImpl<Instruction *> &InsertedInsts) {
3104   if (!match(I, m_Or(m_Value(), m_Value())) &&
3105       !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3106       !match(I, m_FShr(m_Value(), m_Value(), m_Value())))
3107     return false;
3108   if (!MatchBSwaps && !MatchBitReversals)
3109     return false;
3110   Type *ITy = I->getType();
3111   if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3112     return false;  // Can't do integer/elements > 128 bits.
3113 
3114   Type *DemandedTy = ITy;
3115   if (I->hasOneUse())
3116     if (auto *Trunc = dyn_cast<TruncInst>(I->user_back()))
3117       DemandedTy = Trunc->getType();
3118 
3119   // Try to find all the pieces corresponding to the bswap.
3120   std::map<Value *, Optional<BitPart>> BPS;
3121   auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0);
3122   if (!Res)
3123     return false;
3124   ArrayRef<int8_t> BitProvenance = Res->Provenance;
3125   assert(all_of(BitProvenance,
3126                 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3127          "Illegal bit provenance index");
3128 
3129   // If the upper bits are zero, then attempt to perform as a truncated op.
3130   if (BitProvenance.back() == BitPart::Unset) {
3131     while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3132       BitProvenance = BitProvenance.drop_back();
3133     if (BitProvenance.empty())
3134       return false; // TODO - handle null value?
3135     DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3136     if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3137       DemandedTy = VectorType::get(DemandedTy, IVecTy);
3138   }
3139 
3140   // Check BitProvenance hasn't found a source larger than the result type.
3141   unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3142   if (DemandedBW > ITy->getScalarSizeInBits())
3143     return false;
3144 
3145   // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3146   // only byteswap values with an even number of bytes.
3147   APInt DemandedMask = APInt::getAllOnesValue(DemandedBW);
3148   bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3149   bool OKForBitReverse = MatchBitReversals;
3150   for (unsigned BitIdx = 0;
3151        (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3152     if (BitProvenance[BitIdx] == BitPart::Unset) {
3153       DemandedMask.clearBit(BitIdx);
3154       continue;
3155     }
3156     OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3157                                                 DemandedBW);
3158     OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3159                                                           BitIdx, DemandedBW);
3160   }
3161 
3162   Intrinsic::ID Intrin;
3163   if (OKForBSwap)
3164     Intrin = Intrinsic::bswap;
3165   else if (OKForBitReverse)
3166     Intrin = Intrinsic::bitreverse;
3167   else
3168     return false;
3169 
3170   Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
3171   Value *Provider = Res->Provider;
3172 
3173   // We may need to truncate the provider.
3174   if (DemandedTy != Provider->getType()) {
3175     auto *Trunc =
3176         CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I);
3177     InsertedInsts.push_back(Trunc);
3178     Provider = Trunc;
3179   }
3180 
3181   Instruction *Result = CallInst::Create(F, Provider, "rev", I);
3182   InsertedInsts.push_back(Result);
3183 
3184   if (!DemandedMask.isAllOnesValue()) {
3185     auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3186     Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I);
3187     InsertedInsts.push_back(Result);
3188   }
3189 
3190   // We may need to zeroextend back to the result type.
3191   if (ITy != Result->getType()) {
3192     auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I);
3193     InsertedInsts.push_back(ExtInst);
3194   }
3195 
3196   return true;
3197 }
3198 
3199 // CodeGen has special handling for some string functions that may replace
3200 // them with target-specific intrinsics.  Since that'd skip our interceptors
3201 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3202 // we mark affected calls as NoBuiltin, which will disable optimization
3203 // in CodeGen.
3204 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
3205     CallInst *CI, const TargetLibraryInfo *TLI) {
3206   Function *F = CI->getCalledFunction();
3207   LibFunc Func;
3208   if (F && !F->hasLocalLinkage() && F->hasName() &&
3209       TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3210       !F->doesNotAccessMemory())
3211     CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
3212 }
3213 
3214 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
3215   // We can't have a PHI with a metadata type.
3216   if (I->getOperand(OpIdx)->getType()->isMetadataTy())
3217     return false;
3218 
3219   // Early exit.
3220   if (!isa<Constant>(I->getOperand(OpIdx)))
3221     return true;
3222 
3223   switch (I->getOpcode()) {
3224   default:
3225     return true;
3226   case Instruction::Call:
3227   case Instruction::Invoke: {
3228     const auto &CB = cast<CallBase>(*I);
3229 
3230     // Can't handle inline asm. Skip it.
3231     if (CB.isInlineAsm())
3232       return false;
3233 
3234     // Constant bundle operands may need to retain their constant-ness for
3235     // correctness.
3236     if (CB.isBundleOperand(OpIdx))
3237       return false;
3238 
3239     if (OpIdx < CB.getNumArgOperands()) {
3240       // Some variadic intrinsics require constants in the variadic arguments,
3241       // which currently aren't markable as immarg.
3242       if (isa<IntrinsicInst>(CB) &&
3243           OpIdx >= CB.getFunctionType()->getNumParams()) {
3244         // This is known to be OK for stackmap.
3245         return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3246       }
3247 
3248       // gcroot is a special case, since it requires a constant argument which
3249       // isn't also required to be a simple ConstantInt.
3250       if (CB.getIntrinsicID() == Intrinsic::gcroot)
3251         return false;
3252 
3253       // Some intrinsic operands are required to be immediates.
3254       return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3255     }
3256 
3257     // It is never allowed to replace the call argument to an intrinsic, but it
3258     // may be possible for a call.
3259     return !isa<IntrinsicInst>(CB);
3260   }
3261   case Instruction::ShuffleVector:
3262     // Shufflevector masks are constant.
3263     return OpIdx != 2;
3264   case Instruction::Switch:
3265   case Instruction::ExtractValue:
3266     // All operands apart from the first are constant.
3267     return OpIdx == 0;
3268   case Instruction::InsertValue:
3269     // All operands apart from the first and the second are constant.
3270     return OpIdx < 2;
3271   case Instruction::Alloca:
3272     // Static allocas (constant size in the entry block) are handled by
3273     // prologue/epilogue insertion so they're free anyway. We definitely don't
3274     // want to make them non-constant.
3275     return !cast<AllocaInst>(I)->isStaticAlloca();
3276   case Instruction::GetElementPtr:
3277     if (OpIdx == 0)
3278       return true;
3279     gep_type_iterator It = gep_type_begin(I);
3280     for (auto E = std::next(It, OpIdx); It != E; ++It)
3281       if (It.isStruct())
3282         return false;
3283     return true;
3284   }
3285 }
3286 
3287 Value *llvm::invertCondition(Value *Condition) {
3288   // First: Check if it's a constant
3289   if (Constant *C = dyn_cast<Constant>(Condition))
3290     return ConstantExpr::getNot(C);
3291 
3292   // Second: If the condition is already inverted, return the original value
3293   Value *NotCondition;
3294   if (match(Condition, m_Not(m_Value(NotCondition))))
3295     return NotCondition;
3296 
3297   BasicBlock *Parent = nullptr;
3298   Instruction *Inst = dyn_cast<Instruction>(Condition);
3299   if (Inst)
3300     Parent = Inst->getParent();
3301   else if (Argument *Arg = dyn_cast<Argument>(Condition))
3302     Parent = &Arg->getParent()->getEntryBlock();
3303   assert(Parent && "Unsupported condition to invert");
3304 
3305   // Third: Check all the users for an invert
3306   for (User *U : Condition->users())
3307     if (Instruction *I = dyn_cast<Instruction>(U))
3308       if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
3309         return I;
3310 
3311   // Last option: Create a new instruction
3312   auto *Inverted =
3313       BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
3314   if (Inst && !isa<PHINode>(Inst))
3315     Inverted->insertAfter(Inst);
3316   else
3317     Inverted->insertBefore(&*Parent->getFirstInsertionPt());
3318   return Inverted;
3319 }
3320 
3321 bool llvm::inferAttributesFromOthers(Function &F) {
3322   // Note: We explicitly check for attributes rather than using cover functions
3323   // because some of the cover functions include the logic being implemented.
3324 
3325   bool Changed = false;
3326   // readnone + not convergent implies nosync
3327   if (!F.hasFnAttribute(Attribute::NoSync) &&
3328       F.doesNotAccessMemory() && !F.isConvergent()) {
3329     F.setNoSync();
3330     Changed = true;
3331   }
3332 
3333   // readonly implies nofree
3334   if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
3335     F.setDoesNotFreeMemory();
3336     Changed = true;
3337   }
3338 
3339   // willreturn implies mustprogress
3340   if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
3341     F.setMustProgress();
3342     Changed = true;
3343   }
3344 
3345   // TODO: There are a bunch of cases of restrictive memory effects we
3346   // can infer by inspecting arguments of argmemonly-ish functions.
3347 
3348   return Changed;
3349 }
3350