1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Utils/Local.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SetVector.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/DomTreeUpdater.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/LazyValueInfo.h"
33 #include "llvm/Analysis/MemoryBuiltins.h"
34 #include "llvm/Analysis/MemorySSAUpdater.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/Analysis/VectorUtils.h"
38 #include "llvm/BinaryFormat/Dwarf.h"
39 #include "llvm/IR/Argument.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/BasicBlock.h"
42 #include "llvm/IR/CFG.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/ConstantRange.h"
45 #include "llvm/IR/Constants.h"
46 #include "llvm/IR/DIBuilder.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Dominators.h"
52 #include "llvm/IR/Function.h"
53 #include "llvm/IR/GetElementPtrTypeIterator.h"
54 #include "llvm/IR/GlobalObject.h"
55 #include "llvm/IR/IRBuilder.h"
56 #include "llvm/IR/InstrTypes.h"
57 #include "llvm/IR/Instruction.h"
58 #include "llvm/IR/Instructions.h"
59 #include "llvm/IR/IntrinsicInst.h"
60 #include "llvm/IR/Intrinsics.h"
61 #include "llvm/IR/LLVMContext.h"
62 #include "llvm/IR/MDBuilder.h"
63 #include "llvm/IR/Metadata.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/Operator.h"
66 #include "llvm/IR/PatternMatch.h"
67 #include "llvm/IR/PseudoProbe.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/KnownBits.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
79 #include "llvm/Transforms/Utils/ValueMapper.h"
80 #include <algorithm>
81 #include <cassert>
82 #include <climits>
83 #include <cstdint>
84 #include <iterator>
85 #include <map>
86 #include <utility>
87 
88 using namespace llvm;
89 using namespace llvm::PatternMatch;
90 
91 #define DEBUG_TYPE "local"
92 
93 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
94 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
95 
96 static cl::opt<bool> PHICSEDebugHash(
97     "phicse-debug-hash",
98 #ifdef EXPENSIVE_CHECKS
99     cl::init(true),
100 #else
101     cl::init(false),
102 #endif
103     cl::Hidden,
104     cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
105              "function is well-behaved w.r.t. its isEqual predicate"));
106 
107 static cl::opt<unsigned> PHICSENumPHISmallSize(
108     "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
109     cl::desc(
110         "When the basic block contains not more than this number of PHI nodes, "
111         "perform a (faster!) exhaustive search instead of set-driven one."));
112 
113 // Max recursion depth for collectBitParts used when detecting bswap and
114 // bitreverse idioms
115 static const unsigned BitPartRecursionMaxDepth = 64;
116 
117 //===----------------------------------------------------------------------===//
118 //  Local constant propagation.
119 //
120 
121 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
122 /// constant value, convert it into an unconditional branch to the constant
123 /// destination.  This is a nontrivial operation because the successors of this
124 /// basic block must have their PHI nodes updated.
125 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
126 /// conditions and indirectbr addresses this might make dead if
127 /// DeleteDeadConditions is true.
128 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
129                                   const TargetLibraryInfo *TLI,
130                                   DomTreeUpdater *DTU) {
131   Instruction *T = BB->getTerminator();
132   IRBuilder<> Builder(T);
133 
134   // Branch - See if we are conditional jumping on constant
135   if (auto *BI = dyn_cast<BranchInst>(T)) {
136     if (BI->isUnconditional()) return false;  // Can't optimize uncond branch
137 
138     BasicBlock *Dest1 = BI->getSuccessor(0);
139     BasicBlock *Dest2 = BI->getSuccessor(1);
140 
141     if (Dest2 == Dest1) {       // Conditional branch to same location?
142       // This branch matches something like this:
143       //     br bool %cond, label %Dest, label %Dest
144       // and changes it into:  br label %Dest
145 
146       // Let the basic block know that we are letting go of one copy of it.
147       assert(BI->getParent() && "Terminator not inserted in block!");
148       Dest1->removePredecessor(BI->getParent());
149 
150       // Replace the conditional branch with an unconditional one.
151       Builder.CreateBr(Dest1);
152       Value *Cond = BI->getCondition();
153       BI->eraseFromParent();
154       if (DeleteDeadConditions)
155         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
156       return true;
157     }
158 
159     if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
160       // Are we branching on constant?
161       // YES.  Change to unconditional branch...
162       BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
163       BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
164 
165       // Let the basic block know that we are letting go of it.  Based on this,
166       // it will adjust it's PHI nodes.
167       OldDest->removePredecessor(BB);
168 
169       // Replace the conditional branch with an unconditional one.
170       Builder.CreateBr(Destination);
171       BI->eraseFromParent();
172       if (DTU)
173         DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
174       return true;
175     }
176 
177     return false;
178   }
179 
180   if (auto *SI = dyn_cast<SwitchInst>(T)) {
181     // If we are switching on a constant, we can convert the switch to an
182     // unconditional branch.
183     auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
184     BasicBlock *DefaultDest = SI->getDefaultDest();
185     BasicBlock *TheOnlyDest = DefaultDest;
186 
187     // If the default is unreachable, ignore it when searching for TheOnlyDest.
188     if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
189         SI->getNumCases() > 0) {
190       TheOnlyDest = SI->case_begin()->getCaseSuccessor();
191     }
192 
193     bool Changed = false;
194 
195     // Figure out which case it goes to.
196     for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
197       // Found case matching a constant operand?
198       if (i->getCaseValue() == CI) {
199         TheOnlyDest = i->getCaseSuccessor();
200         break;
201       }
202 
203       // Check to see if this branch is going to the same place as the default
204       // dest.  If so, eliminate it as an explicit compare.
205       if (i->getCaseSuccessor() == DefaultDest) {
206         MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
207         unsigned NCases = SI->getNumCases();
208         // Fold the case metadata into the default if there will be any branches
209         // left, unless the metadata doesn't match the switch.
210         if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
211           // Collect branch weights into a vector.
212           SmallVector<uint32_t, 8> Weights;
213           for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
214                ++MD_i) {
215             auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
216             Weights.push_back(CI->getValue().getZExtValue());
217           }
218           // Merge weight of this case to the default weight.
219           unsigned idx = i->getCaseIndex();
220           Weights[0] += Weights[idx+1];
221           // Remove weight for this case.
222           std::swap(Weights[idx+1], Weights.back());
223           Weights.pop_back();
224           SI->setMetadata(LLVMContext::MD_prof,
225                           MDBuilder(BB->getContext()).
226                           createBranchWeights(Weights));
227         }
228         // Remove this entry.
229         BasicBlock *ParentBB = SI->getParent();
230         DefaultDest->removePredecessor(ParentBB);
231         i = SI->removeCase(i);
232         e = SI->case_end();
233         Changed = true;
234         continue;
235       }
236 
237       // Otherwise, check to see if the switch only branches to one destination.
238       // We do this by reseting "TheOnlyDest" to null when we find two non-equal
239       // destinations.
240       if (i->getCaseSuccessor() != TheOnlyDest)
241         TheOnlyDest = nullptr;
242 
243       // Increment this iterator as we haven't removed the case.
244       ++i;
245     }
246 
247     if (CI && !TheOnlyDest) {
248       // Branching on a constant, but not any of the cases, go to the default
249       // successor.
250       TheOnlyDest = SI->getDefaultDest();
251     }
252 
253     // If we found a single destination that we can fold the switch into, do so
254     // now.
255     if (TheOnlyDest) {
256       // Insert the new branch.
257       Builder.CreateBr(TheOnlyDest);
258       BasicBlock *BB = SI->getParent();
259 
260       SmallSet<BasicBlock *, 8> RemovedSuccessors;
261 
262       // Remove entries from PHI nodes which we no longer branch to...
263       BasicBlock *SuccToKeep = TheOnlyDest;
264       for (BasicBlock *Succ : successors(SI)) {
265         if (DTU && Succ != TheOnlyDest)
266           RemovedSuccessors.insert(Succ);
267         // Found case matching a constant operand?
268         if (Succ == SuccToKeep) {
269           SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
270         } else {
271           Succ->removePredecessor(BB);
272         }
273       }
274 
275       // Delete the old switch.
276       Value *Cond = SI->getCondition();
277       SI->eraseFromParent();
278       if (DeleteDeadConditions)
279         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
280       if (DTU) {
281         std::vector<DominatorTree::UpdateType> Updates;
282         Updates.reserve(RemovedSuccessors.size());
283         for (auto *RemovedSuccessor : RemovedSuccessors)
284           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
285         DTU->applyUpdates(Updates);
286       }
287       return true;
288     }
289 
290     if (SI->getNumCases() == 1) {
291       // Otherwise, we can fold this switch into a conditional branch
292       // instruction if it has only one non-default destination.
293       auto FirstCase = *SI->case_begin();
294       Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
295           FirstCase.getCaseValue(), "cond");
296 
297       // Insert the new branch.
298       BranchInst *NewBr = Builder.CreateCondBr(Cond,
299                                                FirstCase.getCaseSuccessor(),
300                                                SI->getDefaultDest());
301       MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
302       if (MD && MD->getNumOperands() == 3) {
303         ConstantInt *SICase =
304             mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
305         ConstantInt *SIDef =
306             mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
307         assert(SICase && SIDef);
308         // The TrueWeight should be the weight for the single case of SI.
309         NewBr->setMetadata(LLVMContext::MD_prof,
310                         MDBuilder(BB->getContext()).
311                         createBranchWeights(SICase->getValue().getZExtValue(),
312                                             SIDef->getValue().getZExtValue()));
313       }
314 
315       // Update make.implicit metadata to the newly-created conditional branch.
316       MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
317       if (MakeImplicitMD)
318         NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
319 
320       // Delete the old switch.
321       SI->eraseFromParent();
322       return true;
323     }
324     return Changed;
325   }
326 
327   if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
328     // indirectbr blockaddress(@F, @BB) -> br label @BB
329     if (auto *BA =
330           dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
331       BasicBlock *TheOnlyDest = BA->getBasicBlock();
332       SmallSet<BasicBlock *, 8> RemovedSuccessors;
333 
334       // Insert the new branch.
335       Builder.CreateBr(TheOnlyDest);
336 
337       BasicBlock *SuccToKeep = TheOnlyDest;
338       for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
339         BasicBlock *DestBB = IBI->getDestination(i);
340         if (DTU && DestBB != TheOnlyDest)
341           RemovedSuccessors.insert(DestBB);
342         if (IBI->getDestination(i) == SuccToKeep) {
343           SuccToKeep = nullptr;
344         } else {
345           DestBB->removePredecessor(BB);
346         }
347       }
348       Value *Address = IBI->getAddress();
349       IBI->eraseFromParent();
350       if (DeleteDeadConditions)
351         // Delete pointer cast instructions.
352         RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
353 
354       // Also zap the blockaddress constant if there are no users remaining,
355       // otherwise the destination is still marked as having its address taken.
356       if (BA->use_empty())
357         BA->destroyConstant();
358 
359       // If we didn't find our destination in the IBI successor list, then we
360       // have undefined behavior.  Replace the unconditional branch with an
361       // 'unreachable' instruction.
362       if (SuccToKeep) {
363         BB->getTerminator()->eraseFromParent();
364         new UnreachableInst(BB->getContext(), BB);
365       }
366 
367       if (DTU) {
368         std::vector<DominatorTree::UpdateType> Updates;
369         Updates.reserve(RemovedSuccessors.size());
370         for (auto *RemovedSuccessor : RemovedSuccessors)
371           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
372         DTU->applyUpdates(Updates);
373       }
374       return true;
375     }
376   }
377 
378   return false;
379 }
380 
381 //===----------------------------------------------------------------------===//
382 //  Local dead code elimination.
383 //
384 
385 /// isInstructionTriviallyDead - Return true if the result produced by the
386 /// instruction is not used, and the instruction has no side effects.
387 ///
388 bool llvm::isInstructionTriviallyDead(Instruction *I,
389                                       const TargetLibraryInfo *TLI) {
390   if (!I->use_empty())
391     return false;
392   return wouldInstructionBeTriviallyDead(I, TLI);
393 }
394 
395 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
396                                            const TargetLibraryInfo *TLI) {
397   if (I->isTerminator())
398     return false;
399 
400   // We don't want the landingpad-like instructions removed by anything this
401   // general.
402   if (I->isEHPad())
403     return false;
404 
405   // We don't want debug info removed by anything this general, unless
406   // debug info is empty.
407   if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
408     if (DDI->getAddress())
409       return false;
410     return true;
411   }
412   if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
413     if (DVI->hasArgList() || DVI->getValue(0))
414       return false;
415     return true;
416   }
417   if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
418     if (DLI->getLabel())
419       return false;
420     return true;
421   }
422 
423   if (!I->willReturn())
424     return false;
425 
426   if (!I->mayHaveSideEffects())
427     return true;
428 
429   // Special case intrinsics that "may have side effects" but can be deleted
430   // when dead.
431   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
432     // Safe to delete llvm.stacksave and launder.invariant.group if dead.
433     if (II->getIntrinsicID() == Intrinsic::stacksave ||
434         II->getIntrinsicID() == Intrinsic::launder_invariant_group)
435       return true;
436 
437     if (II->isLifetimeStartOrEnd()) {
438       auto *Arg = II->getArgOperand(1);
439       // Lifetime intrinsics are dead when their right-hand is undef.
440       if (isa<UndefValue>(Arg))
441         return true;
442       // If the right-hand is an alloc, global, or argument and the only uses
443       // are lifetime intrinsics then the intrinsics are dead.
444       if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
445         return llvm::all_of(Arg->uses(), [](Use &Use) {
446           if (IntrinsicInst *IntrinsicUse =
447                   dyn_cast<IntrinsicInst>(Use.getUser()))
448             return IntrinsicUse->isLifetimeStartOrEnd();
449           return false;
450         });
451       return false;
452     }
453 
454     // Assumptions are dead if their condition is trivially true.  Guards on
455     // true are operationally no-ops.  In the future we can consider more
456     // sophisticated tradeoffs for guards considering potential for check
457     // widening, but for now we keep things simple.
458     if ((II->getIntrinsicID() == Intrinsic::assume &&
459          isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) ||
460         II->getIntrinsicID() == Intrinsic::experimental_guard) {
461       if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
462         return !Cond->isZero();
463 
464       return false;
465     }
466   }
467 
468   if (isAllocLikeFn(I, TLI))
469     return true;
470 
471   if (CallInst *CI = isFreeCall(I, TLI))
472     if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
473       return C->isNullValue() || isa<UndefValue>(C);
474 
475   if (auto *Call = dyn_cast<CallBase>(I))
476     if (isMathLibCallNoop(Call, TLI))
477       return true;
478 
479   return false;
480 }
481 
482 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
483 /// trivially dead instruction, delete it.  If that makes any of its operands
484 /// trivially dead, delete them too, recursively.  Return true if any
485 /// instructions were deleted.
486 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
487     Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
488     std::function<void(Value *)> AboutToDeleteCallback) {
489   Instruction *I = dyn_cast<Instruction>(V);
490   if (!I || !isInstructionTriviallyDead(I, TLI))
491     return false;
492 
493   SmallVector<WeakTrackingVH, 16> DeadInsts;
494   DeadInsts.push_back(I);
495   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
496                                              AboutToDeleteCallback);
497 
498   return true;
499 }
500 
501 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
502     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
503     MemorySSAUpdater *MSSAU,
504     std::function<void(Value *)> AboutToDeleteCallback) {
505   unsigned S = 0, E = DeadInsts.size(), Alive = 0;
506   for (; S != E; ++S) {
507     auto *I = cast<Instruction>(DeadInsts[S]);
508     if (!isInstructionTriviallyDead(I)) {
509       DeadInsts[S] = nullptr;
510       ++Alive;
511     }
512   }
513   if (Alive == E)
514     return false;
515   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
516                                              AboutToDeleteCallback);
517   return true;
518 }
519 
520 void llvm::RecursivelyDeleteTriviallyDeadInstructions(
521     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
522     MemorySSAUpdater *MSSAU,
523     std::function<void(Value *)> AboutToDeleteCallback) {
524   // Process the dead instruction list until empty.
525   while (!DeadInsts.empty()) {
526     Value *V = DeadInsts.pop_back_val();
527     Instruction *I = cast_or_null<Instruction>(V);
528     if (!I)
529       continue;
530     assert(isInstructionTriviallyDead(I, TLI) &&
531            "Live instruction found in dead worklist!");
532     assert(I->use_empty() && "Instructions with uses are not dead.");
533 
534     // Don't lose the debug info while deleting the instructions.
535     salvageDebugInfo(*I);
536 
537     if (AboutToDeleteCallback)
538       AboutToDeleteCallback(I);
539 
540     // Null out all of the instruction's operands to see if any operand becomes
541     // dead as we go.
542     for (Use &OpU : I->operands()) {
543       Value *OpV = OpU.get();
544       OpU.set(nullptr);
545 
546       if (!OpV->use_empty())
547         continue;
548 
549       // If the operand is an instruction that became dead as we nulled out the
550       // operand, and if it is 'trivially' dead, delete it in a future loop
551       // iteration.
552       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
553         if (isInstructionTriviallyDead(OpI, TLI))
554           DeadInsts.push_back(OpI);
555     }
556     if (MSSAU)
557       MSSAU->removeMemoryAccess(I);
558 
559     I->eraseFromParent();
560   }
561 }
562 
563 bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
564   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
565   findDbgUsers(DbgUsers, I);
566   for (auto *DII : DbgUsers) {
567     Value *Undef = UndefValue::get(I->getType());
568     DII->replaceVariableLocationOp(I, Undef);
569   }
570   return !DbgUsers.empty();
571 }
572 
573 /// areAllUsesEqual - Check whether the uses of a value are all the same.
574 /// This is similar to Instruction::hasOneUse() except this will also return
575 /// true when there are no uses or multiple uses that all refer to the same
576 /// value.
577 static bool areAllUsesEqual(Instruction *I) {
578   Value::user_iterator UI = I->user_begin();
579   Value::user_iterator UE = I->user_end();
580   if (UI == UE)
581     return true;
582 
583   User *TheUse = *UI;
584   for (++UI; UI != UE; ++UI) {
585     if (*UI != TheUse)
586       return false;
587   }
588   return true;
589 }
590 
591 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
592 /// dead PHI node, due to being a def-use chain of single-use nodes that
593 /// either forms a cycle or is terminated by a trivially dead instruction,
594 /// delete it.  If that makes any of its operands trivially dead, delete them
595 /// too, recursively.  Return true if a change was made.
596 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
597                                         const TargetLibraryInfo *TLI,
598                                         llvm::MemorySSAUpdater *MSSAU) {
599   SmallPtrSet<Instruction*, 4> Visited;
600   for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
601        I = cast<Instruction>(*I->user_begin())) {
602     if (I->use_empty())
603       return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
604 
605     // If we find an instruction more than once, we're on a cycle that
606     // won't prove fruitful.
607     if (!Visited.insert(I).second) {
608       // Break the cycle and delete the instruction and its operands.
609       I->replaceAllUsesWith(UndefValue::get(I->getType()));
610       (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
611       return true;
612     }
613   }
614   return false;
615 }
616 
617 static bool
618 simplifyAndDCEInstruction(Instruction *I,
619                           SmallSetVector<Instruction *, 16> &WorkList,
620                           const DataLayout &DL,
621                           const TargetLibraryInfo *TLI) {
622   if (isInstructionTriviallyDead(I, TLI)) {
623     salvageDebugInfo(*I);
624 
625     // Null out all of the instruction's operands to see if any operand becomes
626     // dead as we go.
627     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
628       Value *OpV = I->getOperand(i);
629       I->setOperand(i, nullptr);
630 
631       if (!OpV->use_empty() || I == OpV)
632         continue;
633 
634       // If the operand is an instruction that became dead as we nulled out the
635       // operand, and if it is 'trivially' dead, delete it in a future loop
636       // iteration.
637       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
638         if (isInstructionTriviallyDead(OpI, TLI))
639           WorkList.insert(OpI);
640     }
641 
642     I->eraseFromParent();
643 
644     return true;
645   }
646 
647   if (Value *SimpleV = SimplifyInstruction(I, DL)) {
648     // Add the users to the worklist. CAREFUL: an instruction can use itself,
649     // in the case of a phi node.
650     for (User *U : I->users()) {
651       if (U != I) {
652         WorkList.insert(cast<Instruction>(U));
653       }
654     }
655 
656     // Replace the instruction with its simplified value.
657     bool Changed = false;
658     if (!I->use_empty()) {
659       I->replaceAllUsesWith(SimpleV);
660       Changed = true;
661     }
662     if (isInstructionTriviallyDead(I, TLI)) {
663       I->eraseFromParent();
664       Changed = true;
665     }
666     return Changed;
667   }
668   return false;
669 }
670 
671 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
672 /// simplify any instructions in it and recursively delete dead instructions.
673 ///
674 /// This returns true if it changed the code, note that it can delete
675 /// instructions in other blocks as well in this block.
676 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
677                                        const TargetLibraryInfo *TLI) {
678   bool MadeChange = false;
679   const DataLayout &DL = BB->getModule()->getDataLayout();
680 
681 #ifndef NDEBUG
682   // In debug builds, ensure that the terminator of the block is never replaced
683   // or deleted by these simplifications. The idea of simplification is that it
684   // cannot introduce new instructions, and there is no way to replace the
685   // terminator of a block without introducing a new instruction.
686   AssertingVH<Instruction> TerminatorVH(&BB->back());
687 #endif
688 
689   SmallSetVector<Instruction *, 16> WorkList;
690   // Iterate over the original function, only adding insts to the worklist
691   // if they actually need to be revisited. This avoids having to pre-init
692   // the worklist with the entire function's worth of instructions.
693   for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
694        BI != E;) {
695     assert(!BI->isTerminator());
696     Instruction *I = &*BI;
697     ++BI;
698 
699     // We're visiting this instruction now, so make sure it's not in the
700     // worklist from an earlier visit.
701     if (!WorkList.count(I))
702       MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
703   }
704 
705   while (!WorkList.empty()) {
706     Instruction *I = WorkList.pop_back_val();
707     MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
708   }
709   return MadeChange;
710 }
711 
712 //===----------------------------------------------------------------------===//
713 //  Control Flow Graph Restructuring.
714 //
715 
716 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
717                                        DomTreeUpdater *DTU) {
718 
719   // If BB has single-entry PHI nodes, fold them.
720   while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
721     Value *NewVal = PN->getIncomingValue(0);
722     // Replace self referencing PHI with undef, it must be dead.
723     if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
724     PN->replaceAllUsesWith(NewVal);
725     PN->eraseFromParent();
726   }
727 
728   BasicBlock *PredBB = DestBB->getSinglePredecessor();
729   assert(PredBB && "Block doesn't have a single predecessor!");
730 
731   bool ReplaceEntryBB = false;
732   if (PredBB == &DestBB->getParent()->getEntryBlock())
733     ReplaceEntryBB = true;
734 
735   // DTU updates: Collect all the edges that enter
736   // PredBB. These dominator edges will be redirected to DestBB.
737   SmallVector<DominatorTree::UpdateType, 32> Updates;
738 
739   if (DTU) {
740     SmallPtrSet<BasicBlock *, 2> PredsOfPredBB(pred_begin(PredBB),
741                                                pred_end(PredBB));
742     Updates.reserve(Updates.size() + 2 * PredsOfPredBB.size() + 1);
743     for (BasicBlock *PredOfPredBB : PredsOfPredBB)
744       // This predecessor of PredBB may already have DestBB as a successor.
745       if (PredOfPredBB != PredBB)
746         Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
747     for (BasicBlock *PredOfPredBB : PredsOfPredBB)
748       Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
749     Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
750   }
751 
752   // Zap anything that took the address of DestBB.  Not doing this will give the
753   // address an invalid value.
754   if (DestBB->hasAddressTaken()) {
755     BlockAddress *BA = BlockAddress::get(DestBB);
756     Constant *Replacement =
757       ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
758     BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
759                                                      BA->getType()));
760     BA->destroyConstant();
761   }
762 
763   // Anything that branched to PredBB now branches to DestBB.
764   PredBB->replaceAllUsesWith(DestBB);
765 
766   // Splice all the instructions from PredBB to DestBB.
767   PredBB->getTerminator()->eraseFromParent();
768   DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
769   new UnreachableInst(PredBB->getContext(), PredBB);
770 
771   // If the PredBB is the entry block of the function, move DestBB up to
772   // become the entry block after we erase PredBB.
773   if (ReplaceEntryBB)
774     DestBB->moveAfter(PredBB);
775 
776   if (DTU) {
777     assert(PredBB->getInstList().size() == 1 &&
778            isa<UnreachableInst>(PredBB->getTerminator()) &&
779            "The successor list of PredBB isn't empty before "
780            "applying corresponding DTU updates.");
781     DTU->applyUpdatesPermissive(Updates);
782     DTU->deleteBB(PredBB);
783     // Recalculation of DomTree is needed when updating a forward DomTree and
784     // the Entry BB is replaced.
785     if (ReplaceEntryBB && DTU->hasDomTree()) {
786       // The entry block was removed and there is no external interface for
787       // the dominator tree to be notified of this change. In this corner-case
788       // we recalculate the entire tree.
789       DTU->recalculate(*(DestBB->getParent()));
790     }
791   }
792 
793   else {
794     PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
795   }
796 }
797 
798 /// Return true if we can choose one of these values to use in place of the
799 /// other. Note that we will always choose the non-undef value to keep.
800 static bool CanMergeValues(Value *First, Value *Second) {
801   return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
802 }
803 
804 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional
805 /// branch to Succ, into Succ.
806 ///
807 /// Assumption: Succ is the single successor for BB.
808 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
809   assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
810 
811   LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
812                     << Succ->getName() << "\n");
813   // Shortcut, if there is only a single predecessor it must be BB and merging
814   // is always safe
815   if (Succ->getSinglePredecessor()) return true;
816 
817   // Make a list of the predecessors of BB
818   SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
819 
820   // Look at all the phi nodes in Succ, to see if they present a conflict when
821   // merging these blocks
822   for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
823     PHINode *PN = cast<PHINode>(I);
824 
825     // If the incoming value from BB is again a PHINode in
826     // BB which has the same incoming value for *PI as PN does, we can
827     // merge the phi nodes and then the blocks can still be merged
828     PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
829     if (BBPN && BBPN->getParent() == BB) {
830       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
831         BasicBlock *IBB = PN->getIncomingBlock(PI);
832         if (BBPreds.count(IBB) &&
833             !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
834                             PN->getIncomingValue(PI))) {
835           LLVM_DEBUG(dbgs()
836                      << "Can't fold, phi node " << PN->getName() << " in "
837                      << Succ->getName() << " is conflicting with "
838                      << BBPN->getName() << " with regard to common predecessor "
839                      << IBB->getName() << "\n");
840           return false;
841         }
842       }
843     } else {
844       Value* Val = PN->getIncomingValueForBlock(BB);
845       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
846         // See if the incoming value for the common predecessor is equal to the
847         // one for BB, in which case this phi node will not prevent the merging
848         // of the block.
849         BasicBlock *IBB = PN->getIncomingBlock(PI);
850         if (BBPreds.count(IBB) &&
851             !CanMergeValues(Val, PN->getIncomingValue(PI))) {
852           LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
853                             << " in " << Succ->getName()
854                             << " is conflicting with regard to common "
855                             << "predecessor " << IBB->getName() << "\n");
856           return false;
857         }
858       }
859     }
860   }
861 
862   return true;
863 }
864 
865 using PredBlockVector = SmallVector<BasicBlock *, 16>;
866 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
867 
868 /// Determines the value to use as the phi node input for a block.
869 ///
870 /// Select between \p OldVal any value that we know flows from \p BB
871 /// to a particular phi on the basis of which one (if either) is not
872 /// undef. Update IncomingValues based on the selected value.
873 ///
874 /// \param OldVal The value we are considering selecting.
875 /// \param BB The block that the value flows in from.
876 /// \param IncomingValues A map from block-to-value for other phi inputs
877 /// that we have examined.
878 ///
879 /// \returns the selected value.
880 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
881                                           IncomingValueMap &IncomingValues) {
882   if (!isa<UndefValue>(OldVal)) {
883     assert((!IncomingValues.count(BB) ||
884             IncomingValues.find(BB)->second == OldVal) &&
885            "Expected OldVal to match incoming value from BB!");
886 
887     IncomingValues.insert(std::make_pair(BB, OldVal));
888     return OldVal;
889   }
890 
891   IncomingValueMap::const_iterator It = IncomingValues.find(BB);
892   if (It != IncomingValues.end()) return It->second;
893 
894   return OldVal;
895 }
896 
897 /// Create a map from block to value for the operands of a
898 /// given phi.
899 ///
900 /// Create a map from block to value for each non-undef value flowing
901 /// into \p PN.
902 ///
903 /// \param PN The phi we are collecting the map for.
904 /// \param IncomingValues [out] The map from block to value for this phi.
905 static void gatherIncomingValuesToPhi(PHINode *PN,
906                                       IncomingValueMap &IncomingValues) {
907   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
908     BasicBlock *BB = PN->getIncomingBlock(i);
909     Value *V = PN->getIncomingValue(i);
910 
911     if (!isa<UndefValue>(V))
912       IncomingValues.insert(std::make_pair(BB, V));
913   }
914 }
915 
916 /// Replace the incoming undef values to a phi with the values
917 /// from a block-to-value map.
918 ///
919 /// \param PN The phi we are replacing the undefs in.
920 /// \param IncomingValues A map from block to value.
921 static void replaceUndefValuesInPhi(PHINode *PN,
922                                     const IncomingValueMap &IncomingValues) {
923   SmallVector<unsigned> TrueUndefOps;
924   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
925     Value *V = PN->getIncomingValue(i);
926 
927     if (!isa<UndefValue>(V)) continue;
928 
929     BasicBlock *BB = PN->getIncomingBlock(i);
930     IncomingValueMap::const_iterator It = IncomingValues.find(BB);
931 
932     // Keep track of undef/poison incoming values. Those must match, so we fix
933     // them up below if needed.
934     // Note: this is conservatively correct, but we could try harder and group
935     // the undef values per incoming basic block.
936     if (It == IncomingValues.end()) {
937       TrueUndefOps.push_back(i);
938       continue;
939     }
940 
941     // There is a defined value for this incoming block, so map this undef
942     // incoming value to the defined value.
943     PN->setIncomingValue(i, It->second);
944   }
945 
946   // If there are both undef and poison values incoming, then convert those
947   // values to undef. It is invalid to have different values for the same
948   // incoming block.
949   unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
950     return isa<PoisonValue>(PN->getIncomingValue(i));
951   });
952   if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
953     for (unsigned i : TrueUndefOps)
954       PN->setIncomingValue(i, UndefValue::get(PN->getType()));
955   }
956 }
957 
958 /// Replace a value flowing from a block to a phi with
959 /// potentially multiple instances of that value flowing from the
960 /// block's predecessors to the phi.
961 ///
962 /// \param BB The block with the value flowing into the phi.
963 /// \param BBPreds The predecessors of BB.
964 /// \param PN The phi that we are updating.
965 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
966                                                 const PredBlockVector &BBPreds,
967                                                 PHINode *PN) {
968   Value *OldVal = PN->removeIncomingValue(BB, false);
969   assert(OldVal && "No entry in PHI for Pred BB!");
970 
971   IncomingValueMap IncomingValues;
972 
973   // We are merging two blocks - BB, and the block containing PN - and
974   // as a result we need to redirect edges from the predecessors of BB
975   // to go to the block containing PN, and update PN
976   // accordingly. Since we allow merging blocks in the case where the
977   // predecessor and successor blocks both share some predecessors,
978   // and where some of those common predecessors might have undef
979   // values flowing into PN, we want to rewrite those values to be
980   // consistent with the non-undef values.
981 
982   gatherIncomingValuesToPhi(PN, IncomingValues);
983 
984   // If this incoming value is one of the PHI nodes in BB, the new entries
985   // in the PHI node are the entries from the old PHI.
986   if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
987     PHINode *OldValPN = cast<PHINode>(OldVal);
988     for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
989       // Note that, since we are merging phi nodes and BB and Succ might
990       // have common predecessors, we could end up with a phi node with
991       // identical incoming branches. This will be cleaned up later (and
992       // will trigger asserts if we try to clean it up now, without also
993       // simplifying the corresponding conditional branch).
994       BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
995       Value *PredVal = OldValPN->getIncomingValue(i);
996       Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
997                                                     IncomingValues);
998 
999       // And add a new incoming value for this predecessor for the
1000       // newly retargeted branch.
1001       PN->addIncoming(Selected, PredBB);
1002     }
1003   } else {
1004     for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1005       // Update existing incoming values in PN for this
1006       // predecessor of BB.
1007       BasicBlock *PredBB = BBPreds[i];
1008       Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
1009                                                     IncomingValues);
1010 
1011       // And add a new incoming value for this predecessor for the
1012       // newly retargeted branch.
1013       PN->addIncoming(Selected, PredBB);
1014     }
1015   }
1016 
1017   replaceUndefValuesInPhi(PN, IncomingValues);
1018 }
1019 
1020 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1021                                                    DomTreeUpdater *DTU) {
1022   assert(BB != &BB->getParent()->getEntryBlock() &&
1023          "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1024 
1025   // We can't eliminate infinite loops.
1026   BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1027   if (BB == Succ) return false;
1028 
1029   // Check to see if merging these blocks would cause conflicts for any of the
1030   // phi nodes in BB or Succ. If not, we can safely merge.
1031   if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
1032 
1033   // Check for cases where Succ has multiple predecessors and a PHI node in BB
1034   // has uses which will not disappear when the PHI nodes are merged.  It is
1035   // possible to handle such cases, but difficult: it requires checking whether
1036   // BB dominates Succ, which is non-trivial to calculate in the case where
1037   // Succ has multiple predecessors.  Also, it requires checking whether
1038   // constructing the necessary self-referential PHI node doesn't introduce any
1039   // conflicts; this isn't too difficult, but the previous code for doing this
1040   // was incorrect.
1041   //
1042   // Note that if this check finds a live use, BB dominates Succ, so BB is
1043   // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1044   // folding the branch isn't profitable in that case anyway.
1045   if (!Succ->getSinglePredecessor()) {
1046     BasicBlock::iterator BBI = BB->begin();
1047     while (isa<PHINode>(*BBI)) {
1048       for (Use &U : BBI->uses()) {
1049         if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1050           if (PN->getIncomingBlock(U) != BB)
1051             return false;
1052         } else {
1053           return false;
1054         }
1055       }
1056       ++BBI;
1057     }
1058   }
1059 
1060   // We cannot fold the block if it's a branch to an already present callbr
1061   // successor because that creates duplicate successors.
1062   for (BasicBlock *PredBB : predecessors(BB)) {
1063     if (auto *CBI = dyn_cast<CallBrInst>(PredBB->getTerminator())) {
1064       if (Succ == CBI->getDefaultDest())
1065         return false;
1066       for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i)
1067         if (Succ == CBI->getIndirectDest(i))
1068           return false;
1069     }
1070   }
1071 
1072   LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1073 
1074   SmallVector<DominatorTree::UpdateType, 32> Updates;
1075   if (DTU) {
1076     // All predecessors of BB will be moved to Succ.
1077     SmallPtrSet<BasicBlock *, 8> PredsOfBB(pred_begin(BB), pred_end(BB));
1078     SmallPtrSet<BasicBlock *, 8> PredsOfSucc(pred_begin(Succ), pred_end(Succ));
1079     Updates.reserve(Updates.size() + 2 * PredsOfBB.size() + 1);
1080     for (auto *PredOfBB : PredsOfBB)
1081       // This predecessor of BB may already have Succ as a successor.
1082       if (!PredsOfSucc.contains(PredOfBB))
1083         Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1084     for (auto *PredOfBB : PredsOfBB)
1085       Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1086     Updates.push_back({DominatorTree::Delete, BB, Succ});
1087   }
1088 
1089   if (isa<PHINode>(Succ->begin())) {
1090     // If there is more than one pred of succ, and there are PHI nodes in
1091     // the successor, then we need to add incoming edges for the PHI nodes
1092     //
1093     const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
1094 
1095     // Loop over all of the PHI nodes in the successor of BB.
1096     for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1097       PHINode *PN = cast<PHINode>(I);
1098 
1099       redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
1100     }
1101   }
1102 
1103   if (Succ->getSinglePredecessor()) {
1104     // BB is the only predecessor of Succ, so Succ will end up with exactly
1105     // the same predecessors BB had.
1106 
1107     // Copy over any phi, debug or lifetime instruction.
1108     BB->getTerminator()->eraseFromParent();
1109     Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
1110                                BB->getInstList());
1111   } else {
1112     while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1113       // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1114       assert(PN->use_empty() && "There shouldn't be any uses here!");
1115       PN->eraseFromParent();
1116     }
1117   }
1118 
1119   // If the unconditional branch we replaced contains llvm.loop metadata, we
1120   // add the metadata to the branch instructions in the predecessors.
1121   unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
1122   Instruction *TI = BB->getTerminator();
1123   if (TI)
1124     if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
1125       for (BasicBlock *Pred : predecessors(BB))
1126         Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
1127 
1128   // For AutoFDO, since BB is going to be removed, we won't be able to sample
1129   // it. To avoid assigning a zero weight for BB, move all its pseudo probes
1130   // into Succ and mark them dangling. This should allow the counts inference a
1131   // chance to get a more reasonable weight for BB.
1132   moveAndDanglePseudoProbes(BB, &*Succ->getFirstInsertionPt());
1133 
1134   // Everything that jumped to BB now goes to Succ.
1135   BB->replaceAllUsesWith(Succ);
1136   if (!Succ->hasName()) Succ->takeName(BB);
1137 
1138   // Clear the successor list of BB to match updates applying to DTU later.
1139   if (BB->getTerminator())
1140     BB->getInstList().pop_back();
1141   new UnreachableInst(BB->getContext(), BB);
1142   assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1143                            "applying corresponding DTU updates.");
1144 
1145   if (DTU) {
1146     DTU->applyUpdates(Updates);
1147     DTU->deleteBB(BB);
1148   } else {
1149     BB->eraseFromParent(); // Delete the old basic block.
1150   }
1151   return true;
1152 }
1153 
1154 static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB) {
1155   // This implementation doesn't currently consider undef operands
1156   // specially. Theoretically, two phis which are identical except for
1157   // one having an undef where the other doesn't could be collapsed.
1158 
1159   bool Changed = false;
1160 
1161   // Examine each PHI.
1162   // Note that increment of I must *NOT* be in the iteration_expression, since
1163   // we don't want to immediately advance when we restart from the beginning.
1164   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1165     ++I;
1166     // Is there an identical PHI node in this basic block?
1167     // Note that we only look in the upper square's triangle,
1168     // we already checked that the lower triangle PHI's aren't identical.
1169     for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1170       if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1171         continue;
1172       // A duplicate. Replace this PHI with the base PHI.
1173       ++NumPHICSEs;
1174       DuplicatePN->replaceAllUsesWith(PN);
1175       DuplicatePN->eraseFromParent();
1176       Changed = true;
1177 
1178       // The RAUW can change PHIs that we already visited.
1179       I = BB->begin();
1180       break; // Start over from the beginning.
1181     }
1182   }
1183   return Changed;
1184 }
1185 
1186 static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB) {
1187   // This implementation doesn't currently consider undef operands
1188   // specially. Theoretically, two phis which are identical except for
1189   // one having an undef where the other doesn't could be collapsed.
1190 
1191   struct PHIDenseMapInfo {
1192     static PHINode *getEmptyKey() {
1193       return DenseMapInfo<PHINode *>::getEmptyKey();
1194     }
1195 
1196     static PHINode *getTombstoneKey() {
1197       return DenseMapInfo<PHINode *>::getTombstoneKey();
1198     }
1199 
1200     static bool isSentinel(PHINode *PN) {
1201       return PN == getEmptyKey() || PN == getTombstoneKey();
1202     }
1203 
1204     // WARNING: this logic must be kept in sync with
1205     //          Instruction::isIdenticalToWhenDefined()!
1206     static unsigned getHashValueImpl(PHINode *PN) {
1207       // Compute a hash value on the operands. Instcombine will likely have
1208       // sorted them, which helps expose duplicates, but we have to check all
1209       // the operands to be safe in case instcombine hasn't run.
1210       return static_cast<unsigned>(hash_combine(
1211           hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1212           hash_combine_range(PN->block_begin(), PN->block_end())));
1213     }
1214 
1215     static unsigned getHashValue(PHINode *PN) {
1216 #ifndef NDEBUG
1217       // If -phicse-debug-hash was specified, return a constant -- this
1218       // will force all hashing to collide, so we'll exhaustively search
1219       // the table for a match, and the assertion in isEqual will fire if
1220       // there's a bug causing equal keys to hash differently.
1221       if (PHICSEDebugHash)
1222         return 0;
1223 #endif
1224       return getHashValueImpl(PN);
1225     }
1226 
1227     static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1228       if (isSentinel(LHS) || isSentinel(RHS))
1229         return LHS == RHS;
1230       return LHS->isIdenticalTo(RHS);
1231     }
1232 
1233     static bool isEqual(PHINode *LHS, PHINode *RHS) {
1234       // These comparisons are nontrivial, so assert that equality implies
1235       // hash equality (DenseMap demands this as an invariant).
1236       bool Result = isEqualImpl(LHS, RHS);
1237       assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1238              getHashValueImpl(LHS) == getHashValueImpl(RHS));
1239       return Result;
1240     }
1241   };
1242 
1243   // Set of unique PHINodes.
1244   DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1245   PHISet.reserve(4 * PHICSENumPHISmallSize);
1246 
1247   // Examine each PHI.
1248   bool Changed = false;
1249   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1250     auto Inserted = PHISet.insert(PN);
1251     if (!Inserted.second) {
1252       // A duplicate. Replace this PHI with its duplicate.
1253       ++NumPHICSEs;
1254       PN->replaceAllUsesWith(*Inserted.first);
1255       PN->eraseFromParent();
1256       Changed = true;
1257 
1258       // The RAUW can change PHIs that we already visited. Start over from the
1259       // beginning.
1260       PHISet.clear();
1261       I = BB->begin();
1262     }
1263   }
1264 
1265   return Changed;
1266 }
1267 
1268 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1269   if (
1270 #ifndef NDEBUG
1271       !PHICSEDebugHash &&
1272 #endif
1273       hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize))
1274     return EliminateDuplicatePHINodesNaiveImpl(BB);
1275   return EliminateDuplicatePHINodesSetBasedImpl(BB);
1276 }
1277 
1278 /// If the specified pointer points to an object that we control, try to modify
1279 /// the object's alignment to PrefAlign. Returns a minimum known alignment of
1280 /// the value after the operation, which may be lower than PrefAlign.
1281 ///
1282 /// Increating value alignment isn't often possible though. If alignment is
1283 /// important, a more reliable approach is to simply align all global variables
1284 /// and allocation instructions to their preferred alignment from the beginning.
1285 static Align tryEnforceAlignment(Value *V, Align PrefAlign,
1286                                  const DataLayout &DL) {
1287   V = V->stripPointerCasts();
1288 
1289   if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1290     // TODO: Ideally, this function would not be called if PrefAlign is smaller
1291     // than the current alignment, as the known bits calculation should have
1292     // already taken it into account. However, this is not always the case,
1293     // as computeKnownBits() has a depth limit, while stripPointerCasts()
1294     // doesn't.
1295     Align CurrentAlign = AI->getAlign();
1296     if (PrefAlign <= CurrentAlign)
1297       return CurrentAlign;
1298 
1299     // If the preferred alignment is greater than the natural stack alignment
1300     // then don't round up. This avoids dynamic stack realignment.
1301     if (DL.exceedsNaturalStackAlignment(PrefAlign))
1302       return CurrentAlign;
1303     AI->setAlignment(PrefAlign);
1304     return PrefAlign;
1305   }
1306 
1307   if (auto *GO = dyn_cast<GlobalObject>(V)) {
1308     // TODO: as above, this shouldn't be necessary.
1309     Align CurrentAlign = GO->getPointerAlignment(DL);
1310     if (PrefAlign <= CurrentAlign)
1311       return CurrentAlign;
1312 
1313     // If there is a large requested alignment and we can, bump up the alignment
1314     // of the global.  If the memory we set aside for the global may not be the
1315     // memory used by the final program then it is impossible for us to reliably
1316     // enforce the preferred alignment.
1317     if (!GO->canIncreaseAlignment())
1318       return CurrentAlign;
1319 
1320     GO->setAlignment(PrefAlign);
1321     return PrefAlign;
1322   }
1323 
1324   return Align(1);
1325 }
1326 
1327 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1328                                        const DataLayout &DL,
1329                                        const Instruction *CxtI,
1330                                        AssumptionCache *AC,
1331                                        const DominatorTree *DT) {
1332   assert(V->getType()->isPointerTy() &&
1333          "getOrEnforceKnownAlignment expects a pointer!");
1334 
1335   KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1336   unsigned TrailZ = Known.countMinTrailingZeros();
1337 
1338   // Avoid trouble with ridiculously large TrailZ values, such as
1339   // those computed from a null pointer.
1340   // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1341   TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1342 
1343   Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1344 
1345   if (PrefAlign && *PrefAlign > Alignment)
1346     Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1347 
1348   // We don't need to make any adjustment.
1349   return Alignment;
1350 }
1351 
1352 ///===---------------------------------------------------------------------===//
1353 ///  Dbg Intrinsic utilities
1354 ///
1355 
1356 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1357 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1358                              DIExpression *DIExpr,
1359                              PHINode *APN) {
1360   // Since we can't guarantee that the original dbg.declare instrinsic
1361   // is removed by LowerDbgDeclare(), we need to make sure that we are
1362   // not inserting the same dbg.value intrinsic over and over.
1363   SmallVector<DbgValueInst *, 1> DbgValues;
1364   findDbgValues(DbgValues, APN);
1365   for (auto *DVI : DbgValues) {
1366     assert(is_contained(DVI->getValues(), APN));
1367     if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1368       return true;
1369   }
1370   return false;
1371 }
1372 
1373 /// Check if the alloc size of \p ValTy is large enough to cover the variable
1374 /// (or fragment of the variable) described by \p DII.
1375 ///
1376 /// This is primarily intended as a helper for the different
1377 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is
1378 /// converted describes an alloca'd variable, so we need to use the
1379 /// alloc size of the value when doing the comparison. E.g. an i1 value will be
1380 /// identified as covering an n-bit fragment, if the store size of i1 is at
1381 /// least n bits.
1382 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1383   const DataLayout &DL = DII->getModule()->getDataLayout();
1384   TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1385   if (Optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits()) {
1386     assert(!ValueSize.isScalable() &&
1387            "Fragments don't work on scalable types.");
1388     return ValueSize.getFixedSize() >= *FragmentSize;
1389   }
1390   // We can't always calculate the size of the DI variable (e.g. if it is a
1391   // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1392   // intead.
1393   if (DII->isAddressOfVariable()) {
1394     // DII should have exactly 1 location when it is an address.
1395     assert(DII->getNumVariableLocationOps() == 1 &&
1396            "address of variable must have exactly 1 location operand.");
1397     if (auto *AI =
1398             dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1399       if (Optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) {
1400         assert(ValueSize.isScalable() == FragmentSize->isScalable() &&
1401                "Both sizes should agree on the scalable flag.");
1402         return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1403       }
1404     }
1405   }
1406   // Could not determine size of variable. Conservatively return false.
1407   return false;
1408 }
1409 
1410 /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted
1411 /// to a dbg.value. Because no machine insts can come from debug intrinsics,
1412 /// only the scope and inlinedAt is significant. Zero line numbers are used in
1413 /// case this DebugLoc leaks into any adjacent instructions.
1414 static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) {
1415   // Original dbg.declare must have a location.
1416   DebugLoc DeclareLoc = DII->getDebugLoc();
1417   MDNode *Scope = DeclareLoc.getScope();
1418   DILocation *InlinedAt = DeclareLoc.getInlinedAt();
1419   // Produce an unknown location with the correct scope / inlinedAt fields.
1420   return DILocation::get(DII->getContext(), 0, 0, Scope, InlinedAt);
1421 }
1422 
1423 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1424 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1425 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1426                                            StoreInst *SI, DIBuilder &Builder) {
1427   assert(DII->isAddressOfVariable());
1428   auto *DIVar = DII->getVariable();
1429   assert(DIVar && "Missing variable");
1430   auto *DIExpr = DII->getExpression();
1431   Value *DV = SI->getValueOperand();
1432 
1433   DebugLoc NewLoc = getDebugValueLoc(DII, SI);
1434 
1435   if (!valueCoversEntireFragment(DV->getType(), DII)) {
1436     // FIXME: If storing to a part of the variable described by the dbg.declare,
1437     // then we want to insert a dbg.value for the corresponding fragment.
1438     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1439                       << *DII << '\n');
1440     // For now, when there is a store to parts of the variable (but we do not
1441     // know which part) we insert an dbg.value instrinsic to indicate that we
1442     // know nothing about the variable's content.
1443     DV = UndefValue::get(DV->getType());
1444     Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1445     return;
1446   }
1447 
1448   Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1449 }
1450 
1451 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1452 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1453 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1454                                            LoadInst *LI, DIBuilder &Builder) {
1455   auto *DIVar = DII->getVariable();
1456   auto *DIExpr = DII->getExpression();
1457   assert(DIVar && "Missing variable");
1458 
1459   if (!valueCoversEntireFragment(LI->getType(), DII)) {
1460     // FIXME: If only referring to a part of the variable described by the
1461     // dbg.declare, then we want to insert a dbg.value for the corresponding
1462     // fragment.
1463     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1464                       << *DII << '\n');
1465     return;
1466   }
1467 
1468   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1469 
1470   // We are now tracking the loaded value instead of the address. In the
1471   // future if multi-location support is added to the IR, it might be
1472   // preferable to keep tracking both the loaded value and the original
1473   // address in case the alloca can not be elided.
1474   Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1475       LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr);
1476   DbgValue->insertAfter(LI);
1477 }
1478 
1479 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1480 /// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1481 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1482                                            PHINode *APN, DIBuilder &Builder) {
1483   auto *DIVar = DII->getVariable();
1484   auto *DIExpr = DII->getExpression();
1485   assert(DIVar && "Missing variable");
1486 
1487   if (PhiHasDebugValue(DIVar, DIExpr, APN))
1488     return;
1489 
1490   if (!valueCoversEntireFragment(APN->getType(), DII)) {
1491     // FIXME: If only referring to a part of the variable described by the
1492     // dbg.declare, then we want to insert a dbg.value for the corresponding
1493     // fragment.
1494     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1495                       << *DII << '\n');
1496     return;
1497   }
1498 
1499   BasicBlock *BB = APN->getParent();
1500   auto InsertionPt = BB->getFirstInsertionPt();
1501 
1502   DebugLoc NewLoc = getDebugValueLoc(DII, nullptr);
1503 
1504   // The block may be a catchswitch block, which does not have a valid
1505   // insertion point.
1506   // FIXME: Insert dbg.value markers in the successors when appropriate.
1507   if (InsertionPt != BB->end())
1508     Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt);
1509 }
1510 
1511 /// Determine whether this alloca is either a VLA or an array.
1512 static bool isArray(AllocaInst *AI) {
1513   return AI->isArrayAllocation() ||
1514          (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1515 }
1516 
1517 /// Determine whether this alloca is a structure.
1518 static bool isStructure(AllocaInst *AI) {
1519   return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1520 }
1521 
1522 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1523 /// of llvm.dbg.value intrinsics.
1524 bool llvm::LowerDbgDeclare(Function &F) {
1525   bool Changed = false;
1526   DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1527   SmallVector<DbgDeclareInst *, 4> Dbgs;
1528   for (auto &FI : F)
1529     for (Instruction &BI : FI)
1530       if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1531         Dbgs.push_back(DDI);
1532 
1533   if (Dbgs.empty())
1534     return Changed;
1535 
1536   for (auto &I : Dbgs) {
1537     DbgDeclareInst *DDI = I;
1538     AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1539     // If this is an alloca for a scalar variable, insert a dbg.value
1540     // at each load and store to the alloca and erase the dbg.declare.
1541     // The dbg.values allow tracking a variable even if it is not
1542     // stored on the stack, while the dbg.declare can only describe
1543     // the stack slot (and at a lexical-scope granularity). Later
1544     // passes will attempt to elide the stack slot.
1545     if (!AI || isArray(AI) || isStructure(AI))
1546       continue;
1547 
1548     // A volatile load/store means that the alloca can't be elided anyway.
1549     if (llvm::any_of(AI->users(), [](User *U) -> bool {
1550           if (LoadInst *LI = dyn_cast<LoadInst>(U))
1551             return LI->isVolatile();
1552           if (StoreInst *SI = dyn_cast<StoreInst>(U))
1553             return SI->isVolatile();
1554           return false;
1555         }))
1556       continue;
1557 
1558     SmallVector<const Value *, 8> WorkList;
1559     WorkList.push_back(AI);
1560     while (!WorkList.empty()) {
1561       const Value *V = WorkList.pop_back_val();
1562       for (auto &AIUse : V->uses()) {
1563         User *U = AIUse.getUser();
1564         if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1565           if (AIUse.getOperandNo() == 1)
1566             ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1567         } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1568           ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1569         } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1570           // This is a call by-value or some other instruction that takes a
1571           // pointer to the variable. Insert a *value* intrinsic that describes
1572           // the variable by dereferencing the alloca.
1573           if (!CI->isLifetimeStartOrEnd()) {
1574             DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr);
1575             auto *DerefExpr =
1576                 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1577             DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr,
1578                                         NewLoc, CI);
1579           }
1580         } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1581           if (BI->getType()->isPointerTy())
1582             WorkList.push_back(BI);
1583         }
1584       }
1585     }
1586     DDI->eraseFromParent();
1587     Changed = true;
1588   }
1589 
1590   if (Changed)
1591   for (BasicBlock &BB : F)
1592     RemoveRedundantDbgInstrs(&BB);
1593 
1594   return Changed;
1595 }
1596 
1597 /// Propagate dbg.value intrinsics through the newly inserted PHIs.
1598 void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1599                                     SmallVectorImpl<PHINode *> &InsertedPHIs) {
1600   assert(BB && "No BasicBlock to clone dbg.value(s) from.");
1601   if (InsertedPHIs.size() == 0)
1602     return;
1603 
1604   // Map existing PHI nodes to their dbg.values.
1605   ValueToValueMapTy DbgValueMap;
1606   for (auto &I : *BB) {
1607     if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1608       for (Value *V : DbgII->location_ops())
1609         if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1610           DbgValueMap.insert({Loc, DbgII});
1611     }
1612   }
1613   if (DbgValueMap.size() == 0)
1614     return;
1615 
1616   // Map a pair of the destination BB and old dbg.value to the new dbg.value,
1617   // so that if a dbg.value is being rewritten to use more than one of the
1618   // inserted PHIs in the same destination BB, we can update the same dbg.value
1619   // with all the new PHIs instead of creating one copy for each.
1620   MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
1621             DbgVariableIntrinsic *>
1622       NewDbgValueMap;
1623   // Then iterate through the new PHIs and look to see if they use one of the
1624   // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
1625   // propagate the info through the new PHI. If we use more than one new PHI in
1626   // a single destination BB with the same old dbg.value, merge the updates so
1627   // that we get a single new dbg.value with all the new PHIs.
1628   for (auto PHI : InsertedPHIs) {
1629     BasicBlock *Parent = PHI->getParent();
1630     // Avoid inserting an intrinsic into an EH block.
1631     if (Parent->getFirstNonPHI()->isEHPad())
1632       continue;
1633     for (auto VI : PHI->operand_values()) {
1634       auto V = DbgValueMap.find(VI);
1635       if (V != DbgValueMap.end()) {
1636         auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
1637         auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1638         if (NewDI == NewDbgValueMap.end()) {
1639           auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
1640           NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1641         }
1642         DbgVariableIntrinsic *NewDbgII = NewDI->second;
1643         // If PHI contains VI as an operand more than once, we may
1644         // replaced it in NewDbgII; confirm that it is present.
1645         if (is_contained(NewDbgII->location_ops(), VI))
1646           NewDbgII->replaceVariableLocationOp(VI, PHI);
1647       }
1648     }
1649   }
1650   // Insert thew new dbg.values into their destination blocks.
1651   for (auto DI : NewDbgValueMap) {
1652     BasicBlock *Parent = DI.first.first;
1653     auto *NewDbgII = DI.second;
1654     auto InsertionPt = Parent->getFirstInsertionPt();
1655     assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1656     NewDbgII->insertBefore(&*InsertionPt);
1657   }
1658 }
1659 
1660 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1661                              DIBuilder &Builder, uint8_t DIExprFlags,
1662                              int Offset) {
1663   auto DbgAddrs = FindDbgAddrUses(Address);
1664   for (DbgVariableIntrinsic *DII : DbgAddrs) {
1665     DebugLoc Loc = DII->getDebugLoc();
1666     auto *DIVar = DII->getVariable();
1667     auto *DIExpr = DII->getExpression();
1668     assert(DIVar && "Missing variable");
1669     DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1670     // Insert llvm.dbg.declare immediately before DII, and remove old
1671     // llvm.dbg.declare.
1672     Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII);
1673     DII->eraseFromParent();
1674   }
1675   return !DbgAddrs.empty();
1676 }
1677 
1678 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1679                                         DIBuilder &Builder, int Offset) {
1680   DebugLoc Loc = DVI->getDebugLoc();
1681   auto *DIVar = DVI->getVariable();
1682   auto *DIExpr = DVI->getExpression();
1683   assert(DIVar && "Missing variable");
1684 
1685   // This is an alloca-based llvm.dbg.value. The first thing it should do with
1686   // the alloca pointer is dereference it. Otherwise we don't know how to handle
1687   // it and give up.
1688   if (!DIExpr || DIExpr->getNumElements() < 1 ||
1689       DIExpr->getElement(0) != dwarf::DW_OP_deref)
1690     return;
1691 
1692   // Insert the offset before the first deref.
1693   // We could just change the offset argument of dbg.value, but it's unsigned...
1694   if (Offset)
1695     DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1696 
1697   Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1698   DVI->eraseFromParent();
1699 }
1700 
1701 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1702                                     DIBuilder &Builder, int Offset) {
1703   if (auto *L = LocalAsMetadata::getIfExists(AI))
1704     if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1705       for (Use &U : llvm::make_early_inc_range(MDV->uses()))
1706         if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1707           replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1708 }
1709 
1710 /// Where possible to salvage debug information for \p I do so
1711 /// and return True. If not possible mark undef and return False.
1712 void llvm::salvageDebugInfo(Instruction &I) {
1713   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
1714   findDbgUsers(DbgUsers, &I);
1715   salvageDebugInfoForDbgValues(I, DbgUsers);
1716 }
1717 
1718 void llvm::salvageDebugInfoForDbgValues(
1719     Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) {
1720   bool Salvaged = false;
1721 
1722   for (auto *DII : DbgUsers) {
1723     // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they
1724     // are implicitly pointing out the value as a DWARF memory location
1725     // description.
1726     bool StackValue = isa<DbgValueInst>(DII);
1727     auto DIILocation = DII->location_ops();
1728     assert(
1729         is_contained(DIILocation, &I) &&
1730         "DbgVariableIntrinsic must use salvaged instruction as its location");
1731     unsigned LocNo = std::distance(DIILocation.begin(), find(DIILocation, &I));
1732 
1733     DIExpression *DIExpr =
1734         salvageDebugInfoImpl(I, DII->getExpression(), StackValue, LocNo);
1735 
1736     // salvageDebugInfoImpl should fail on examining the first element of
1737     // DbgUsers, or none of them.
1738     if (!DIExpr)
1739       break;
1740 
1741     DII->replaceVariableLocationOp(&I, I.getOperand(0));
1742     DII->setExpression(DIExpr);
1743     LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1744     Salvaged = true;
1745   }
1746 
1747   if (Salvaged)
1748     return;
1749 
1750   for (auto *DII : DbgUsers) {
1751     Value *Undef = UndefValue::get(I.getType());
1752     DII->replaceVariableLocationOp(&I, Undef);
1753   }
1754 }
1755 
1756 bool getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
1757                          SmallVectorImpl<uint64_t> &Opcodes) {
1758   unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
1759   // Rewrite a constant GEP into a DIExpression.
1760   APInt ConstantOffset(BitWidth, 0);
1761   if (!GEP->accumulateConstantOffset(DL, ConstantOffset))
1762     return false;
1763   DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
1764   return true;
1765 }
1766 
1767 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
1768   switch (Opcode) {
1769   case Instruction::Add:
1770     return dwarf::DW_OP_plus;
1771   case Instruction::Sub:
1772     return dwarf::DW_OP_minus;
1773   case Instruction::Mul:
1774     return dwarf::DW_OP_mul;
1775   case Instruction::SDiv:
1776     return dwarf::DW_OP_div;
1777   case Instruction::SRem:
1778     return dwarf::DW_OP_mod;
1779   case Instruction::Or:
1780     return dwarf::DW_OP_or;
1781   case Instruction::And:
1782     return dwarf::DW_OP_and;
1783   case Instruction::Xor:
1784     return dwarf::DW_OP_xor;
1785   case Instruction::Shl:
1786     return dwarf::DW_OP_shl;
1787   case Instruction::LShr:
1788     return dwarf::DW_OP_shr;
1789   case Instruction::AShr:
1790     return dwarf::DW_OP_shra;
1791   default:
1792     // TODO: Salvage from each kind of binop we know about.
1793     return 0;
1794   }
1795 }
1796 
1797 bool getSalvageOpsForBinOp(BinaryOperator *BI,
1798                            SmallVectorImpl<uint64_t> &Opcodes) {
1799   // Rewrite binary operations with constant integer operands.
1800   auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
1801   if (!ConstInt || ConstInt->getBitWidth() > 64)
1802     return false;
1803   uint64_t Val = ConstInt->getSExtValue();
1804   Instruction::BinaryOps BinOpcode = BI->getOpcode();
1805   // Add or Sub Instructions with a constant operand can potentially be
1806   // simplified.
1807   if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
1808     uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
1809     DIExpression::appendOffset(Opcodes, Offset);
1810     return true;
1811   }
1812   // Add constant int operand to expression stack.
1813   Opcodes.append({dwarf::DW_OP_constu, Val});
1814 
1815   // Add salvaged binary operator to expression stack, if it has a valid
1816   // representation in a DIExpression.
1817   uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
1818   if (!DwarfBinOp)
1819     return false;
1820   Opcodes.push_back(DwarfBinOp);
1821 
1822   return true;
1823 }
1824 
1825 DIExpression *llvm::salvageDebugInfoImpl(Instruction &I,
1826                                          DIExpression *SrcDIExpr,
1827                                          bool WithStackValue, unsigned LocNo) {
1828   auto &M = *I.getModule();
1829   auto &DL = M.getDataLayout();
1830 
1831   // Apply a vector of opcodes to the source DIExpression.
1832   auto doSalvage = [&](SmallVectorImpl<uint64_t> &Ops) -> DIExpression * {
1833     DIExpression *DIExpr = SrcDIExpr;
1834     if (!Ops.empty()) {
1835       DIExpr = DIExpression::appendOpsToArg(DIExpr, Ops, LocNo, WithStackValue);
1836     }
1837     return DIExpr;
1838   };
1839 
1840   // initializer-list helper for applying operators to the source DIExpression.
1841   auto applyOps = [&](ArrayRef<uint64_t> Opcodes) -> DIExpression * {
1842     SmallVector<uint64_t, 8> Ops(Opcodes.begin(), Opcodes.end());
1843     return doSalvage(Ops);
1844   };
1845 
1846   if (auto *CI = dyn_cast<CastInst>(&I)) {
1847     // No-op casts are irrelevant for debug info.
1848     if (CI->isNoopCast(DL))
1849       return SrcDIExpr;
1850 
1851     Type *Type = CI->getType();
1852     // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
1853     if (Type->isVectorTy() ||
1854         !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I)))
1855       return nullptr;
1856 
1857     Value *FromValue = CI->getOperand(0);
1858     unsigned FromTypeBitSize = FromValue->getType()->getScalarSizeInBits();
1859     unsigned ToTypeBitSize = Type->getScalarSizeInBits();
1860 
1861     return applyOps(DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
1862                                             isa<SExtInst>(&I)));
1863   }
1864 
1865   SmallVector<uint64_t, 8> Ops;
1866   if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1867     if (getSalvageOpsForGEP(GEP, DL, Ops))
1868       return doSalvage(Ops);
1869   } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1870     if (getSalvageOpsForBinOp(BI, Ops))
1871       return doSalvage(Ops);
1872   }
1873     // *Not* to do: we should not attempt to salvage load instructions,
1874     // because the validity and lifetime of a dbg.value containing
1875     // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
1876   return nullptr;
1877 }
1878 
1879 /// A replacement for a dbg.value expression.
1880 using DbgValReplacement = Optional<DIExpression *>;
1881 
1882 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
1883 /// possibly moving/undefing users to prevent use-before-def. Returns true if
1884 /// changes are made.
1885 static bool rewriteDebugUsers(
1886     Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
1887     function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) {
1888   // Find debug users of From.
1889   SmallVector<DbgVariableIntrinsic *, 1> Users;
1890   findDbgUsers(Users, &From);
1891   if (Users.empty())
1892     return false;
1893 
1894   // Prevent use-before-def of To.
1895   bool Changed = false;
1896   SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
1897   if (isa<Instruction>(&To)) {
1898     bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
1899 
1900     for (auto *DII : Users) {
1901       // It's common to see a debug user between From and DomPoint. Move it
1902       // after DomPoint to preserve the variable update without any reordering.
1903       if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
1904         LLVM_DEBUG(dbgs() << "MOVE:  " << *DII << '\n');
1905         DII->moveAfter(&DomPoint);
1906         Changed = true;
1907 
1908       // Users which otherwise aren't dominated by the replacement value must
1909       // be salvaged or deleted.
1910       } else if (!DT.dominates(&DomPoint, DII)) {
1911         UndefOrSalvage.insert(DII);
1912       }
1913     }
1914   }
1915 
1916   // Update debug users without use-before-def risk.
1917   for (auto *DII : Users) {
1918     if (UndefOrSalvage.count(DII))
1919       continue;
1920 
1921     DbgValReplacement DVR = RewriteExpr(*DII);
1922     if (!DVR)
1923       continue;
1924 
1925     DII->replaceVariableLocationOp(&From, &To);
1926     DII->setExpression(*DVR);
1927     LLVM_DEBUG(dbgs() << "REWRITE:  " << *DII << '\n');
1928     Changed = true;
1929   }
1930 
1931   if (!UndefOrSalvage.empty()) {
1932     // Try to salvage the remaining debug users.
1933     salvageDebugInfo(From);
1934     Changed = true;
1935   }
1936 
1937   return Changed;
1938 }
1939 
1940 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
1941 /// losslessly preserve the bits and semantics of the value. This predicate is
1942 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
1943 ///
1944 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it
1945 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
1946 /// and also does not allow lossless pointer <-> integer conversions.
1947 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
1948                                          Type *ToTy) {
1949   // Trivially compatible types.
1950   if (FromTy == ToTy)
1951     return true;
1952 
1953   // Handle compatible pointer <-> integer conversions.
1954   if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
1955     bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
1956     bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
1957                               !DL.isNonIntegralPointerType(ToTy);
1958     return SameSize && LosslessConversion;
1959   }
1960 
1961   // TODO: This is not exhaustive.
1962   return false;
1963 }
1964 
1965 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
1966                                  Instruction &DomPoint, DominatorTree &DT) {
1967   // Exit early if From has no debug users.
1968   if (!From.isUsedByMetadata())
1969     return false;
1970 
1971   assert(&From != &To && "Can't replace something with itself");
1972 
1973   Type *FromTy = From.getType();
1974   Type *ToTy = To.getType();
1975 
1976   auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
1977     return DII.getExpression();
1978   };
1979 
1980   // Handle no-op conversions.
1981   Module &M = *From.getModule();
1982   const DataLayout &DL = M.getDataLayout();
1983   if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
1984     return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
1985 
1986   // Handle integer-to-integer widening and narrowing.
1987   // FIXME: Use DW_OP_convert when it's available everywhere.
1988   if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
1989     uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
1990     uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
1991     assert(FromBits != ToBits && "Unexpected no-op conversion");
1992 
1993     // When the width of the result grows, assume that a debugger will only
1994     // access the low `FromBits` bits when inspecting the source variable.
1995     if (FromBits < ToBits)
1996       return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
1997 
1998     // The width of the result has shrunk. Use sign/zero extension to describe
1999     // the source variable's high bits.
2000     auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2001       DILocalVariable *Var = DII.getVariable();
2002 
2003       // Without knowing signedness, sign/zero extension isn't possible.
2004       auto Signedness = Var->getSignedness();
2005       if (!Signedness)
2006         return None;
2007 
2008       bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2009       return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2010                                      Signed);
2011     };
2012     return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt);
2013   }
2014 
2015   // TODO: Floating-point conversions, vectors.
2016   return false;
2017 }
2018 
2019 std::pair<unsigned, unsigned>
2020 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2021   unsigned NumDeadInst = 0;
2022   unsigned NumDeadDbgInst = 0;
2023   // Delete the instructions backwards, as it has a reduced likelihood of
2024   // having to update as many def-use and use-def chains.
2025   Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2026   while (EndInst != &BB->front()) {
2027     // Delete the next to last instruction.
2028     Instruction *Inst = &*--EndInst->getIterator();
2029     if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2030       Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
2031     if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2032       EndInst = Inst;
2033       continue;
2034     }
2035     if (isa<DbgInfoIntrinsic>(Inst))
2036       ++NumDeadDbgInst;
2037     else
2038       ++NumDeadInst;
2039     Inst->eraseFromParent();
2040   }
2041   return {NumDeadInst, NumDeadDbgInst};
2042 }
2043 
2044 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
2045                                    bool PreserveLCSSA, DomTreeUpdater *DTU,
2046                                    MemorySSAUpdater *MSSAU) {
2047   BasicBlock *BB = I->getParent();
2048 
2049   if (MSSAU)
2050     MSSAU->changeToUnreachable(I);
2051 
2052   SmallSet<BasicBlock *, 8> UniqueSuccessors;
2053 
2054   // Loop over all of the successors, removing BB's entry from any PHI
2055   // nodes.
2056   for (BasicBlock *Successor : successors(BB)) {
2057     Successor->removePredecessor(BB, PreserveLCSSA);
2058     if (DTU)
2059       UniqueSuccessors.insert(Successor);
2060   }
2061   // Insert a call to llvm.trap right before this.  This turns the undefined
2062   // behavior into a hard fail instead of falling through into random code.
2063   if (UseLLVMTrap) {
2064     Function *TrapFn =
2065       Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
2066     CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
2067     CallTrap->setDebugLoc(I->getDebugLoc());
2068   }
2069   auto *UI = new UnreachableInst(I->getContext(), I);
2070   UI->setDebugLoc(I->getDebugLoc());
2071 
2072   // All instructions after this are dead.
2073   unsigned NumInstrsRemoved = 0;
2074   BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2075   while (BBI != BBE) {
2076     if (!BBI->use_empty())
2077       BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
2078     BB->getInstList().erase(BBI++);
2079     ++NumInstrsRemoved;
2080   }
2081   if (DTU) {
2082     SmallVector<DominatorTree::UpdateType, 8> Updates;
2083     Updates.reserve(UniqueSuccessors.size());
2084     for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2085       Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2086     DTU->applyUpdates(Updates);
2087   }
2088   return NumInstrsRemoved;
2089 }
2090 
2091 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2092   SmallVector<Value *, 8> Args(II->args());
2093   SmallVector<OperandBundleDef, 1> OpBundles;
2094   II->getOperandBundlesAsDefs(OpBundles);
2095   CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2096                                        II->getCalledOperand(), Args, OpBundles);
2097   NewCall->setCallingConv(II->getCallingConv());
2098   NewCall->setAttributes(II->getAttributes());
2099   NewCall->setDebugLoc(II->getDebugLoc());
2100   NewCall->copyMetadata(*II);
2101 
2102   // If the invoke had profile metadata, try converting them for CallInst.
2103   uint64_t TotalWeight;
2104   if (NewCall->extractProfTotalWeight(TotalWeight)) {
2105     // Set the total weight if it fits into i32, otherwise reset.
2106     MDBuilder MDB(NewCall->getContext());
2107     auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2108                           ? nullptr
2109                           : MDB.createBranchWeights({uint32_t(TotalWeight)});
2110     NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2111   }
2112 
2113   return NewCall;
2114 }
2115 
2116 /// changeToCall - Convert the specified invoke into a normal call.
2117 void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2118   CallInst *NewCall = createCallMatchingInvoke(II);
2119   NewCall->takeName(II);
2120   NewCall->insertBefore(II);
2121   II->replaceAllUsesWith(NewCall);
2122 
2123   // Follow the call by a branch to the normal destination.
2124   BasicBlock *NormalDestBB = II->getNormalDest();
2125   BranchInst::Create(NormalDestBB, II);
2126 
2127   // Update PHI nodes in the unwind destination
2128   BasicBlock *BB = II->getParent();
2129   BasicBlock *UnwindDestBB = II->getUnwindDest();
2130   UnwindDestBB->removePredecessor(BB);
2131   II->eraseFromParent();
2132   if (DTU)
2133     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2134 }
2135 
2136 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2137                                                    BasicBlock *UnwindEdge,
2138                                                    DomTreeUpdater *DTU) {
2139   BasicBlock *BB = CI->getParent();
2140 
2141   // Convert this function call into an invoke instruction.  First, split the
2142   // basic block.
2143   BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2144                                  CI->getName() + ".noexc");
2145 
2146   // Delete the unconditional branch inserted by SplitBlock
2147   BB->getInstList().pop_back();
2148 
2149   // Create the new invoke instruction.
2150   SmallVector<Value *, 8> InvokeArgs(CI->args());
2151   SmallVector<OperandBundleDef, 1> OpBundles;
2152 
2153   CI->getOperandBundlesAsDefs(OpBundles);
2154 
2155   // Note: we're round tripping operand bundles through memory here, and that
2156   // can potentially be avoided with a cleverer API design that we do not have
2157   // as of this time.
2158 
2159   InvokeInst *II =
2160       InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
2161                          UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2162   II->setDebugLoc(CI->getDebugLoc());
2163   II->setCallingConv(CI->getCallingConv());
2164   II->setAttributes(CI->getAttributes());
2165 
2166   if (DTU)
2167     DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2168 
2169   // Make sure that anything using the call now uses the invoke!  This also
2170   // updates the CallGraph if present, because it uses a WeakTrackingVH.
2171   CI->replaceAllUsesWith(II);
2172 
2173   // Delete the original call
2174   Split->getInstList().pop_front();
2175   return Split;
2176 }
2177 
2178 static bool markAliveBlocks(Function &F,
2179                             SmallPtrSetImpl<BasicBlock *> &Reachable,
2180                             DomTreeUpdater *DTU = nullptr) {
2181   SmallVector<BasicBlock*, 128> Worklist;
2182   BasicBlock *BB = &F.front();
2183   Worklist.push_back(BB);
2184   Reachable.insert(BB);
2185   bool Changed = false;
2186   do {
2187     BB = Worklist.pop_back_val();
2188 
2189     // Do a quick scan of the basic block, turning any obviously unreachable
2190     // instructions into LLVM unreachable insts.  The instruction combining pass
2191     // canonicalizes unreachable insts into stores to null or undef.
2192     for (Instruction &I : *BB) {
2193       if (auto *CI = dyn_cast<CallInst>(&I)) {
2194         Value *Callee = CI->getCalledOperand();
2195         // Handle intrinsic calls.
2196         if (Function *F = dyn_cast<Function>(Callee)) {
2197           auto IntrinsicID = F->getIntrinsicID();
2198           // Assumptions that are known to be false are equivalent to
2199           // unreachable. Also, if the condition is undefined, then we make the
2200           // choice most beneficial to the optimizer, and choose that to also be
2201           // unreachable.
2202           if (IntrinsicID == Intrinsic::assume) {
2203             if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2204               // Don't insert a call to llvm.trap right before the unreachable.
2205               changeToUnreachable(CI, false, false, DTU);
2206               Changed = true;
2207               break;
2208             }
2209           } else if (IntrinsicID == Intrinsic::experimental_guard) {
2210             // A call to the guard intrinsic bails out of the current
2211             // compilation unit if the predicate passed to it is false. If the
2212             // predicate is a constant false, then we know the guard will bail
2213             // out of the current compile unconditionally, so all code following
2214             // it is dead.
2215             //
2216             // Note: unlike in llvm.assume, it is not "obviously profitable" for
2217             // guards to treat `undef` as `false` since a guard on `undef` can
2218             // still be useful for widening.
2219             if (match(CI->getArgOperand(0), m_Zero()))
2220               if (!isa<UnreachableInst>(CI->getNextNode())) {
2221                 changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false,
2222                                     false, DTU);
2223                 Changed = true;
2224                 break;
2225               }
2226           }
2227         } else if ((isa<ConstantPointerNull>(Callee) &&
2228                     !NullPointerIsDefined(CI->getFunction())) ||
2229                    isa<UndefValue>(Callee)) {
2230           changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DTU);
2231           Changed = true;
2232           break;
2233         }
2234         if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2235           // If we found a call to a no-return function, insert an unreachable
2236           // instruction after it.  Make sure there isn't *already* one there
2237           // though.
2238           if (!isa<UnreachableInst>(CI->getNextNode())) {
2239             // Don't insert a call to llvm.trap right before the unreachable.
2240             changeToUnreachable(CI->getNextNode(), false, false, DTU);
2241             Changed = true;
2242           }
2243           break;
2244         }
2245       } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2246         // Store to undef and store to null are undefined and used to signal
2247         // that they should be changed to unreachable by passes that can't
2248         // modify the CFG.
2249 
2250         // Don't touch volatile stores.
2251         if (SI->isVolatile()) continue;
2252 
2253         Value *Ptr = SI->getOperand(1);
2254 
2255         if (isa<UndefValue>(Ptr) ||
2256             (isa<ConstantPointerNull>(Ptr) &&
2257              !NullPointerIsDefined(SI->getFunction(),
2258                                    SI->getPointerAddressSpace()))) {
2259           changeToUnreachable(SI, true, false, DTU);
2260           Changed = true;
2261           break;
2262         }
2263       }
2264     }
2265 
2266     Instruction *Terminator = BB->getTerminator();
2267     if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2268       // Turn invokes that call 'nounwind' functions into ordinary calls.
2269       Value *Callee = II->getCalledOperand();
2270       if ((isa<ConstantPointerNull>(Callee) &&
2271            !NullPointerIsDefined(BB->getParent())) ||
2272           isa<UndefValue>(Callee)) {
2273         changeToUnreachable(II, true, false, DTU);
2274         Changed = true;
2275       } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2276         if (II->use_empty() && II->onlyReadsMemory()) {
2277           // jump to the normal destination branch.
2278           BasicBlock *NormalDestBB = II->getNormalDest();
2279           BasicBlock *UnwindDestBB = II->getUnwindDest();
2280           BranchInst::Create(NormalDestBB, II);
2281           UnwindDestBB->removePredecessor(II->getParent());
2282           II->eraseFromParent();
2283           if (DTU)
2284             DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2285         } else
2286           changeToCall(II, DTU);
2287         Changed = true;
2288       }
2289     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2290       // Remove catchpads which cannot be reached.
2291       struct CatchPadDenseMapInfo {
2292         static CatchPadInst *getEmptyKey() {
2293           return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2294         }
2295 
2296         static CatchPadInst *getTombstoneKey() {
2297           return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2298         }
2299 
2300         static unsigned getHashValue(CatchPadInst *CatchPad) {
2301           return static_cast<unsigned>(hash_combine_range(
2302               CatchPad->value_op_begin(), CatchPad->value_op_end()));
2303         }
2304 
2305         static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2306           if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2307               RHS == getEmptyKey() || RHS == getTombstoneKey())
2308             return LHS == RHS;
2309           return LHS->isIdenticalTo(RHS);
2310         }
2311       };
2312 
2313       SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2314       // Set of unique CatchPads.
2315       SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2316                     CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2317           HandlerSet;
2318       detail::DenseSetEmpty Empty;
2319       for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2320                                              E = CatchSwitch->handler_end();
2321            I != E; ++I) {
2322         BasicBlock *HandlerBB = *I;
2323         if (DTU)
2324           ++NumPerSuccessorCases[HandlerBB];
2325         auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
2326         if (!HandlerSet.insert({CatchPad, Empty}).second) {
2327           if (DTU)
2328             --NumPerSuccessorCases[HandlerBB];
2329           CatchSwitch->removeHandler(I);
2330           --I;
2331           --E;
2332           Changed = true;
2333         }
2334       }
2335       if (DTU) {
2336         std::vector<DominatorTree::UpdateType> Updates;
2337         for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2338           if (I.second == 0)
2339             Updates.push_back({DominatorTree::Delete, BB, I.first});
2340         DTU->applyUpdates(Updates);
2341       }
2342     }
2343 
2344     Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2345     for (BasicBlock *Successor : successors(BB))
2346       if (Reachable.insert(Successor).second)
2347         Worklist.push_back(Successor);
2348   } while (!Worklist.empty());
2349   return Changed;
2350 }
2351 
2352 void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
2353   Instruction *TI = BB->getTerminator();
2354 
2355   if (auto *II = dyn_cast<InvokeInst>(TI)) {
2356     changeToCall(II, DTU);
2357     return;
2358   }
2359 
2360   Instruction *NewTI;
2361   BasicBlock *UnwindDest;
2362 
2363   if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2364     NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
2365     UnwindDest = CRI->getUnwindDest();
2366   } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2367     auto *NewCatchSwitch = CatchSwitchInst::Create(
2368         CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2369         CatchSwitch->getName(), CatchSwitch);
2370     for (BasicBlock *PadBB : CatchSwitch->handlers())
2371       NewCatchSwitch->addHandler(PadBB);
2372 
2373     NewTI = NewCatchSwitch;
2374     UnwindDest = CatchSwitch->getUnwindDest();
2375   } else {
2376     llvm_unreachable("Could not find unwind successor");
2377   }
2378 
2379   NewTI->takeName(TI);
2380   NewTI->setDebugLoc(TI->getDebugLoc());
2381   UnwindDest->removePredecessor(BB);
2382   TI->replaceAllUsesWith(NewTI);
2383   TI->eraseFromParent();
2384   if (DTU)
2385     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2386 }
2387 
2388 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
2389 /// if they are in a dead cycle.  Return true if a change was made, false
2390 /// otherwise.
2391 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
2392                                    MemorySSAUpdater *MSSAU) {
2393   SmallPtrSet<BasicBlock *, 16> Reachable;
2394   bool Changed = markAliveBlocks(F, Reachable, DTU);
2395 
2396   // If there are unreachable blocks in the CFG...
2397   if (Reachable.size() == F.size())
2398     return Changed;
2399 
2400   assert(Reachable.size() < F.size());
2401 
2402   // Are there any blocks left to actually delete?
2403   SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2404   for (BasicBlock &BB : F) {
2405     // Skip reachable basic blocks
2406     if (Reachable.count(&BB))
2407       continue;
2408     // Skip already-deleted blocks
2409     if (DTU && DTU->isBBPendingDeletion(&BB))
2410       continue;
2411     BlocksToRemove.insert(&BB);
2412   }
2413 
2414   if (BlocksToRemove.empty())
2415     return Changed;
2416 
2417   Changed = true;
2418   NumRemoved += BlocksToRemove.size();
2419 
2420   if (MSSAU)
2421     MSSAU->removeBlocks(BlocksToRemove);
2422 
2423   // Loop over all of the basic blocks that are up for removal, dropping all of
2424   // their internal references. Update DTU if available.
2425   std::vector<DominatorTree::UpdateType> Updates;
2426   for (auto *BB : BlocksToRemove) {
2427     SmallSet<BasicBlock *, 8> UniqueSuccessors;
2428     for (BasicBlock *Successor : successors(BB)) {
2429       // Only remove references to BB in reachable successors of BB.
2430       if (Reachable.count(Successor))
2431         Successor->removePredecessor(BB);
2432       if (DTU)
2433         UniqueSuccessors.insert(Successor);
2434     }
2435     BB->dropAllReferences();
2436     if (DTU) {
2437       Instruction *TI = BB->getTerminator();
2438       assert(TI && "Basic block should have a terminator");
2439       // Terminators like invoke can have users. We have to replace their users,
2440       // before removing them.
2441       if (!TI->use_empty())
2442         TI->replaceAllUsesWith(UndefValue::get(TI->getType()));
2443       TI->eraseFromParent();
2444       new UnreachableInst(BB->getContext(), BB);
2445       assert(succ_empty(BB) && "The successor list of BB isn't empty before "
2446                                "applying corresponding DTU updates.");
2447       Updates.reserve(Updates.size() + UniqueSuccessors.size());
2448       for (auto *UniqueSuccessor : UniqueSuccessors)
2449         Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2450     }
2451   }
2452 
2453   if (DTU) {
2454     DTU->applyUpdates(Updates);
2455     for (auto *BB : BlocksToRemove)
2456       DTU->deleteBB(BB);
2457   } else {
2458     for (auto *BB : BlocksToRemove)
2459       BB->eraseFromParent();
2460   }
2461 
2462   return Changed;
2463 }
2464 
2465 void llvm::combineMetadata(Instruction *K, const Instruction *J,
2466                            ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
2467   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2468   K->dropUnknownNonDebugMetadata(KnownIDs);
2469   K->getAllMetadataOtherThanDebugLoc(Metadata);
2470   for (const auto &MD : Metadata) {
2471     unsigned Kind = MD.first;
2472     MDNode *JMD = J->getMetadata(Kind);
2473     MDNode *KMD = MD.second;
2474 
2475     switch (Kind) {
2476       default:
2477         K->setMetadata(Kind, nullptr); // Remove unknown metadata
2478         break;
2479       case LLVMContext::MD_dbg:
2480         llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2481       case LLVMContext::MD_tbaa:
2482         K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2483         break;
2484       case LLVMContext::MD_alias_scope:
2485         K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2486         break;
2487       case LLVMContext::MD_noalias:
2488       case LLVMContext::MD_mem_parallel_loop_access:
2489         K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2490         break;
2491       case LLVMContext::MD_access_group:
2492         K->setMetadata(LLVMContext::MD_access_group,
2493                        intersectAccessGroups(K, J));
2494         break;
2495       case LLVMContext::MD_range:
2496 
2497         // If K does move, use most generic range. Otherwise keep the range of
2498         // K.
2499         if (DoesKMove)
2500           // FIXME: If K does move, we should drop the range info and nonnull.
2501           //        Currently this function is used with DoesKMove in passes
2502           //        doing hoisting/sinking and the current behavior of using the
2503           //        most generic range is correct in those cases.
2504           K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2505         break;
2506       case LLVMContext::MD_fpmath:
2507         K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2508         break;
2509       case LLVMContext::MD_invariant_load:
2510         // Only set the !invariant.load if it is present in both instructions.
2511         K->setMetadata(Kind, JMD);
2512         break;
2513       case LLVMContext::MD_nonnull:
2514         // If K does move, keep nonull if it is present in both instructions.
2515         if (DoesKMove)
2516           K->setMetadata(Kind, JMD);
2517         break;
2518       case LLVMContext::MD_invariant_group:
2519         // Preserve !invariant.group in K.
2520         break;
2521       case LLVMContext::MD_align:
2522         K->setMetadata(Kind,
2523           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2524         break;
2525       case LLVMContext::MD_dereferenceable:
2526       case LLVMContext::MD_dereferenceable_or_null:
2527         K->setMetadata(Kind,
2528           MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2529         break;
2530       case LLVMContext::MD_preserve_access_index:
2531         // Preserve !preserve.access.index in K.
2532         break;
2533     }
2534   }
2535   // Set !invariant.group from J if J has it. If both instructions have it
2536   // then we will just pick it from J - even when they are different.
2537   // Also make sure that K is load or store - f.e. combining bitcast with load
2538   // could produce bitcast with invariant.group metadata, which is invalid.
2539   // FIXME: we should try to preserve both invariant.group md if they are
2540   // different, but right now instruction can only have one invariant.group.
2541   if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2542     if (isa<LoadInst>(K) || isa<StoreInst>(K))
2543       K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2544 }
2545 
2546 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
2547                                  bool KDominatesJ) {
2548   unsigned KnownIDs[] = {
2549       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2550       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2551       LLVMContext::MD_invariant_load,  LLVMContext::MD_nonnull,
2552       LLVMContext::MD_invariant_group, LLVMContext::MD_align,
2553       LLVMContext::MD_dereferenceable,
2554       LLVMContext::MD_dereferenceable_or_null,
2555       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2556   combineMetadata(K, J, KnownIDs, KDominatesJ);
2557 }
2558 
2559 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
2560   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
2561   Source.getAllMetadata(MD);
2562   MDBuilder MDB(Dest.getContext());
2563   Type *NewType = Dest.getType();
2564   const DataLayout &DL = Source.getModule()->getDataLayout();
2565   for (const auto &MDPair : MD) {
2566     unsigned ID = MDPair.first;
2567     MDNode *N = MDPair.second;
2568     // Note, essentially every kind of metadata should be preserved here! This
2569     // routine is supposed to clone a load instruction changing *only its type*.
2570     // The only metadata it makes sense to drop is metadata which is invalidated
2571     // when the pointer type changes. This should essentially never be the case
2572     // in LLVM, but we explicitly switch over only known metadata to be
2573     // conservatively correct. If you are adding metadata to LLVM which pertains
2574     // to loads, you almost certainly want to add it here.
2575     switch (ID) {
2576     case LLVMContext::MD_dbg:
2577     case LLVMContext::MD_tbaa:
2578     case LLVMContext::MD_prof:
2579     case LLVMContext::MD_fpmath:
2580     case LLVMContext::MD_tbaa_struct:
2581     case LLVMContext::MD_invariant_load:
2582     case LLVMContext::MD_alias_scope:
2583     case LLVMContext::MD_noalias:
2584     case LLVMContext::MD_nontemporal:
2585     case LLVMContext::MD_mem_parallel_loop_access:
2586     case LLVMContext::MD_access_group:
2587       // All of these directly apply.
2588       Dest.setMetadata(ID, N);
2589       break;
2590 
2591     case LLVMContext::MD_nonnull:
2592       copyNonnullMetadata(Source, N, Dest);
2593       break;
2594 
2595     case LLVMContext::MD_align:
2596     case LLVMContext::MD_dereferenceable:
2597     case LLVMContext::MD_dereferenceable_or_null:
2598       // These only directly apply if the new type is also a pointer.
2599       if (NewType->isPointerTy())
2600         Dest.setMetadata(ID, N);
2601       break;
2602 
2603     case LLVMContext::MD_range:
2604       copyRangeMetadata(DL, Source, N, Dest);
2605       break;
2606     }
2607   }
2608 }
2609 
2610 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
2611   auto *ReplInst = dyn_cast<Instruction>(Repl);
2612   if (!ReplInst)
2613     return;
2614 
2615   // Patch the replacement so that it is not more restrictive than the value
2616   // being replaced.
2617   // Note that if 'I' is a load being replaced by some operation,
2618   // for example, by an arithmetic operation, then andIRFlags()
2619   // would just erase all math flags from the original arithmetic
2620   // operation, which is clearly not wanted and not needed.
2621   if (!isa<LoadInst>(I))
2622     ReplInst->andIRFlags(I);
2623 
2624   // FIXME: If both the original and replacement value are part of the
2625   // same control-flow region (meaning that the execution of one
2626   // guarantees the execution of the other), then we can combine the
2627   // noalias scopes here and do better than the general conservative
2628   // answer used in combineMetadata().
2629 
2630   // In general, GVN unifies expressions over different control-flow
2631   // regions, and so we need a conservative combination of the noalias
2632   // scopes.
2633   static const unsigned KnownIDs[] = {
2634       LLVMContext::MD_tbaa,            LLVMContext::MD_alias_scope,
2635       LLVMContext::MD_noalias,         LLVMContext::MD_range,
2636       LLVMContext::MD_fpmath,          LLVMContext::MD_invariant_load,
2637       LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull,
2638       LLVMContext::MD_access_group,    LLVMContext::MD_preserve_access_index};
2639   combineMetadata(ReplInst, I, KnownIDs, false);
2640 }
2641 
2642 template <typename RootType, typename DominatesFn>
2643 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2644                                          const RootType &Root,
2645                                          const DominatesFn &Dominates) {
2646   assert(From->getType() == To->getType());
2647 
2648   unsigned Count = 0;
2649   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2650        UI != UE;) {
2651     Use &U = *UI++;
2652     if (!Dominates(Root, U))
2653       continue;
2654     U.set(To);
2655     LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()
2656                       << "' as " << *To << " in " << *U << "\n");
2657     ++Count;
2658   }
2659   return Count;
2660 }
2661 
2662 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2663    assert(From->getType() == To->getType());
2664    auto *BB = From->getParent();
2665    unsigned Count = 0;
2666 
2667   for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
2668        UI != UE;) {
2669     Use &U = *UI++;
2670     auto *I = cast<Instruction>(U.getUser());
2671     if (I->getParent() == BB)
2672       continue;
2673     U.set(To);
2674     ++Count;
2675   }
2676   return Count;
2677 }
2678 
2679 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2680                                         DominatorTree &DT,
2681                                         const BasicBlockEdge &Root) {
2682   auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2683     return DT.dominates(Root, U);
2684   };
2685   return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2686 }
2687 
2688 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2689                                         DominatorTree &DT,
2690                                         const BasicBlock *BB) {
2691   auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
2692     return DT.dominates(BB, U);
2693   };
2694   return ::replaceDominatedUsesWith(From, To, BB, Dominates);
2695 }
2696 
2697 bool llvm::callsGCLeafFunction(const CallBase *Call,
2698                                const TargetLibraryInfo &TLI) {
2699   // Check if the function is specifically marked as a gc leaf function.
2700   if (Call->hasFnAttr("gc-leaf-function"))
2701     return true;
2702   if (const Function *F = Call->getCalledFunction()) {
2703     if (F->hasFnAttribute("gc-leaf-function"))
2704       return true;
2705 
2706     if (auto IID = F->getIntrinsicID()) {
2707       // Most LLVM intrinsics do not take safepoints.
2708       return IID != Intrinsic::experimental_gc_statepoint &&
2709              IID != Intrinsic::experimental_deoptimize &&
2710              IID != Intrinsic::memcpy_element_unordered_atomic &&
2711              IID != Intrinsic::memmove_element_unordered_atomic;
2712     }
2713   }
2714 
2715   // Lib calls can be materialized by some passes, and won't be
2716   // marked as 'gc-leaf-function.' All available Libcalls are
2717   // GC-leaf.
2718   LibFunc LF;
2719   if (TLI.getLibFunc(*Call, LF)) {
2720     return TLI.has(LF);
2721   }
2722 
2723   return false;
2724 }
2725 
2726 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2727                                LoadInst &NewLI) {
2728   auto *NewTy = NewLI.getType();
2729 
2730   // This only directly applies if the new type is also a pointer.
2731   if (NewTy->isPointerTy()) {
2732     NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2733     return;
2734   }
2735 
2736   // The only other translation we can do is to integral loads with !range
2737   // metadata.
2738   if (!NewTy->isIntegerTy())
2739     return;
2740 
2741   MDBuilder MDB(NewLI.getContext());
2742   const Value *Ptr = OldLI.getPointerOperand();
2743   auto *ITy = cast<IntegerType>(NewTy);
2744   auto *NullInt = ConstantExpr::getPtrToInt(
2745       ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
2746   auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
2747   NewLI.setMetadata(LLVMContext::MD_range,
2748                     MDB.createRange(NonNullInt, NullInt));
2749 }
2750 
2751 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
2752                              MDNode *N, LoadInst &NewLI) {
2753   auto *NewTy = NewLI.getType();
2754 
2755   // Give up unless it is converted to a pointer where there is a single very
2756   // valuable mapping we can do reliably.
2757   // FIXME: It would be nice to propagate this in more ways, but the type
2758   // conversions make it hard.
2759   if (!NewTy->isPointerTy())
2760     return;
2761 
2762   unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
2763   if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
2764     MDNode *NN = MDNode::get(OldLI.getContext(), None);
2765     NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
2766   }
2767 }
2768 
2769 void llvm::dropDebugUsers(Instruction &I) {
2770   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
2771   findDbgUsers(DbgUsers, &I);
2772   for (auto *DII : DbgUsers)
2773     DII->eraseFromParent();
2774 }
2775 
2776 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
2777                                     BasicBlock *BB) {
2778   // Since we are moving the instructions out of its basic block, we do not
2779   // retain their original debug locations (DILocations) and debug intrinsic
2780   // instructions.
2781   //
2782   // Doing so would degrade the debugging experience and adversely affect the
2783   // accuracy of profiling information.
2784   //
2785   // Currently, when hoisting the instructions, we take the following actions:
2786   // - Remove their debug intrinsic instructions.
2787   // - Set their debug locations to the values from the insertion point.
2788   //
2789   // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
2790   // need to be deleted, is because there will not be any instructions with a
2791   // DILocation in either branch left after performing the transformation. We
2792   // can only insert a dbg.value after the two branches are joined again.
2793   //
2794   // See PR38762, PR39243 for more details.
2795   //
2796   // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
2797   // encode predicated DIExpressions that yield different results on different
2798   // code paths.
2799 
2800   // A hoisted conditional probe should be treated as dangling so that it will
2801   // not be over-counted when the samples collected on the non-conditional path
2802   // are counted towards the conditional path. We leave it for the counts
2803   // inference algorithm to figure out a proper count for a danglng probe.
2804   moveAndDanglePseudoProbes(BB, InsertPt);
2805 
2806   for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
2807     Instruction *I = &*II;
2808     I->dropUnknownNonDebugMetadata();
2809     if (I->isUsedByMetadata())
2810       dropDebugUsers(*I);
2811     if (isa<DbgInfoIntrinsic>(I)) {
2812       // Remove DbgInfo Intrinsics.
2813       II = I->eraseFromParent();
2814       continue;
2815     }
2816     I->setDebugLoc(InsertPt->getDebugLoc());
2817     ++II;
2818   }
2819   DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(),
2820                                  BB->begin(),
2821                                  BB->getTerminator()->getIterator());
2822 }
2823 
2824 namespace {
2825 
2826 /// A potential constituent of a bitreverse or bswap expression. See
2827 /// collectBitParts for a fuller explanation.
2828 struct BitPart {
2829   BitPart(Value *P, unsigned BW) : Provider(P) {
2830     Provenance.resize(BW);
2831   }
2832 
2833   /// The Value that this is a bitreverse/bswap of.
2834   Value *Provider;
2835 
2836   /// The "provenance" of each bit. Provenance[A] = B means that bit A
2837   /// in Provider becomes bit B in the result of this expression.
2838   SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
2839 
2840   enum { Unset = -1 };
2841 };
2842 
2843 } // end anonymous namespace
2844 
2845 /// Analyze the specified subexpression and see if it is capable of providing
2846 /// pieces of a bswap or bitreverse. The subexpression provides a potential
2847 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
2848 /// the output of the expression came from a corresponding bit in some other
2849 /// value. This function is recursive, and the end result is a mapping of
2850 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
2851 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
2852 ///
2853 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
2854 /// that the expression deposits the low byte of %X into the high byte of the
2855 /// result and that all other bits are zero. This expression is accepted and a
2856 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2857 /// [0-7].
2858 ///
2859 /// For vector types, all analysis is performed at the per-element level. No
2860 /// cross-element analysis is supported (shuffle/insertion/reduction), and all
2861 /// constant masks must be splatted across all elements.
2862 ///
2863 /// To avoid revisiting values, the BitPart results are memoized into the
2864 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
2865 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2866 /// store BitParts objects, not pointers. As we need the concept of a nullptr
2867 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
2868 /// type instead to provide the same functionality.
2869 ///
2870 /// Because we pass around references into \c BPS, we must use a container that
2871 /// does not invalidate internal references (std::map instead of DenseMap).
2872 static const Optional<BitPart> &
2873 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2874                 std::map<Value *, Optional<BitPart>> &BPS, int Depth) {
2875   auto I = BPS.find(V);
2876   if (I != BPS.end())
2877     return I->second;
2878 
2879   auto &Result = BPS[V] = None;
2880   auto BitWidth = V->getType()->getScalarSizeInBits();
2881 
2882   // Can't do integer/elements > 128 bits.
2883   if (BitWidth > 128)
2884     return Result;
2885 
2886   // Prevent stack overflow by limiting the recursion depth
2887   if (Depth == BitPartRecursionMaxDepth) {
2888     LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
2889     return Result;
2890   }
2891 
2892   if (auto *I = dyn_cast<Instruction>(V)) {
2893     Value *X, *Y;
2894     const APInt *C;
2895 
2896     // If this is an or instruction, it may be an inner node of the bswap.
2897     if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
2898       const auto &A =
2899           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2900       const auto &B =
2901           collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2902       if (!A || !B)
2903         return Result;
2904 
2905       // Try and merge the two together.
2906       if (!A->Provider || A->Provider != B->Provider)
2907         return Result;
2908 
2909       Result = BitPart(A->Provider, BitWidth);
2910       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
2911         if (A->Provenance[BitIdx] != BitPart::Unset &&
2912             B->Provenance[BitIdx] != BitPart::Unset &&
2913             A->Provenance[BitIdx] != B->Provenance[BitIdx])
2914           return Result = None;
2915 
2916         if (A->Provenance[BitIdx] == BitPart::Unset)
2917           Result->Provenance[BitIdx] = B->Provenance[BitIdx];
2918         else
2919           Result->Provenance[BitIdx] = A->Provenance[BitIdx];
2920       }
2921 
2922       return Result;
2923     }
2924 
2925     // If this is a logical shift by a constant, recurse then shift the result.
2926     if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
2927       const APInt &BitShift = *C;
2928 
2929       // Ensure the shift amount is defined.
2930       if (BitShift.uge(BitWidth))
2931         return Result;
2932 
2933       const auto &Res =
2934           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2935       if (!Res)
2936         return Result;
2937       Result = Res;
2938 
2939       // Perform the "shift" on BitProvenance.
2940       auto &P = Result->Provenance;
2941       if (I->getOpcode() == Instruction::Shl) {
2942         P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
2943         P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
2944       } else {
2945         P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
2946         P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
2947       }
2948 
2949       return Result;
2950     }
2951 
2952     // If this is a logical 'and' with a mask that clears bits, recurse then
2953     // unset the appropriate bits.
2954     if (match(V, m_And(m_Value(X), m_APInt(C)))) {
2955       const APInt &AndMask = *C;
2956 
2957       // Check that the mask allows a multiple of 8 bits for a bswap, for an
2958       // early exit.
2959       unsigned NumMaskedBits = AndMask.countPopulation();
2960       if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
2961         return Result;
2962 
2963       const auto &Res =
2964           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2965       if (!Res)
2966         return Result;
2967       Result = Res;
2968 
2969       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
2970         // If the AndMask is zero for this bit, clear the bit.
2971         if (AndMask[BitIdx] == 0)
2972           Result->Provenance[BitIdx] = BitPart::Unset;
2973       return Result;
2974     }
2975 
2976     // If this is a zext instruction zero extend the result.
2977     if (match(V, m_ZExt(m_Value(X)))) {
2978       const auto &Res =
2979           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2980       if (!Res)
2981         return Result;
2982 
2983       Result = BitPart(Res->Provider, BitWidth);
2984       auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
2985       for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
2986         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
2987       for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
2988         Result->Provenance[BitIdx] = BitPart::Unset;
2989       return Result;
2990     }
2991 
2992     // If this is a truncate instruction, extract the lower bits.
2993     if (match(V, m_Trunc(m_Value(X)))) {
2994       const auto &Res =
2995           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
2996       if (!Res)
2997         return Result;
2998 
2999       Result = BitPart(Res->Provider, BitWidth);
3000       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3001         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3002       return Result;
3003     }
3004 
3005     // BITREVERSE - most likely due to us previous matching a partial
3006     // bitreverse.
3007     if (match(V, m_BitReverse(m_Value(X)))) {
3008       const auto &Res =
3009           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3010       if (!Res)
3011         return Result;
3012 
3013       Result = BitPart(Res->Provider, BitWidth);
3014       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3015         Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3016       return Result;
3017     }
3018 
3019     // BSWAP - most likely due to us previous matching a partial bswap.
3020     if (match(V, m_BSwap(m_Value(X)))) {
3021       const auto &Res =
3022           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3023       if (!Res)
3024         return Result;
3025 
3026       unsigned ByteWidth = BitWidth / 8;
3027       Result = BitPart(Res->Provider, BitWidth);
3028       for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3029         unsigned ByteBitOfs = ByteIdx * 8;
3030         for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3031           Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3032               Res->Provenance[ByteBitOfs + BitIdx];
3033       }
3034       return Result;
3035     }
3036 
3037     // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3038     // amount (modulo).
3039     // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3040     // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3041     if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3042         match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3043       // We can treat fshr as a fshl by flipping the modulo amount.
3044       unsigned ModAmt = C->urem(BitWidth);
3045       if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3046         ModAmt = BitWidth - ModAmt;
3047 
3048       const auto &LHS =
3049           collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3050       const auto &RHS =
3051           collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, Depth + 1);
3052 
3053       // Check we have both sources and they are from the same provider.
3054       if (!LHS || !RHS || !LHS->Provider || LHS->Provider != RHS->Provider)
3055         return Result;
3056 
3057       unsigned StartBitRHS = BitWidth - ModAmt;
3058       Result = BitPart(LHS->Provider, BitWidth);
3059       for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3060         Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3061       for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3062         Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3063       return Result;
3064     }
3065   }
3066 
3067   // Okay, we got to something that isn't a shift, 'or' or 'and'.  This must be
3068   // the input value to the bswap/bitreverse.
3069   Result = BitPart(V, BitWidth);
3070   for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3071     Result->Provenance[BitIdx] = BitIdx;
3072   return Result;
3073 }
3074 
3075 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3076                                           unsigned BitWidth) {
3077   if (From % 8 != To % 8)
3078     return false;
3079   // Convert from bit indices to byte indices and check for a byte reversal.
3080   From >>= 3;
3081   To >>= 3;
3082   BitWidth >>= 3;
3083   return From == BitWidth - To - 1;
3084 }
3085 
3086 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3087                                                unsigned BitWidth) {
3088   return From == BitWidth - To - 1;
3089 }
3090 
3091 bool llvm::recognizeBSwapOrBitReverseIdiom(
3092     Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3093     SmallVectorImpl<Instruction *> &InsertedInsts) {
3094   if (Operator::getOpcode(I) != Instruction::Or)
3095     return false;
3096   if (!MatchBSwaps && !MatchBitReversals)
3097     return false;
3098   Type *ITy = I->getType();
3099   if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3100     return false;  // Can't do integer/elements > 128 bits.
3101 
3102   Type *DemandedTy = ITy;
3103   if (I->hasOneUse())
3104     if (auto *Trunc = dyn_cast<TruncInst>(I->user_back()))
3105       DemandedTy = Trunc->getType();
3106 
3107   // Try to find all the pieces corresponding to the bswap.
3108   std::map<Value *, Optional<BitPart>> BPS;
3109   auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0);
3110   if (!Res)
3111     return false;
3112   ArrayRef<int8_t> BitProvenance = Res->Provenance;
3113   assert(all_of(BitProvenance,
3114                 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3115          "Illegal bit provenance index");
3116 
3117   // If the upper bits are zero, then attempt to perform as a truncated op.
3118   if (BitProvenance.back() == BitPart::Unset) {
3119     while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3120       BitProvenance = BitProvenance.drop_back();
3121     if (BitProvenance.empty())
3122       return false; // TODO - handle null value?
3123     DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3124     if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3125       DemandedTy = VectorType::get(DemandedTy, IVecTy);
3126   }
3127 
3128   // Check BitProvenance hasn't found a source larger than the result type.
3129   unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3130   if (DemandedBW > ITy->getScalarSizeInBits())
3131     return false;
3132 
3133   // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3134   // only byteswap values with an even number of bytes.
3135   APInt DemandedMask = APInt::getAllOnesValue(DemandedBW);
3136   bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3137   bool OKForBitReverse = MatchBitReversals;
3138   for (unsigned BitIdx = 0;
3139        (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3140     if (BitProvenance[BitIdx] == BitPart::Unset) {
3141       DemandedMask.clearBit(BitIdx);
3142       continue;
3143     }
3144     OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3145                                                 DemandedBW);
3146     OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3147                                                           BitIdx, DemandedBW);
3148   }
3149 
3150   Intrinsic::ID Intrin;
3151   if (OKForBSwap)
3152     Intrin = Intrinsic::bswap;
3153   else if (OKForBitReverse)
3154     Intrin = Intrinsic::bitreverse;
3155   else
3156     return false;
3157 
3158   Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
3159   Value *Provider = Res->Provider;
3160 
3161   // We may need to truncate the provider.
3162   if (DemandedTy != Provider->getType()) {
3163     auto *Trunc =
3164         CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I);
3165     InsertedInsts.push_back(Trunc);
3166     Provider = Trunc;
3167   }
3168 
3169   Instruction *Result = CallInst::Create(F, Provider, "rev", I);
3170   InsertedInsts.push_back(Result);
3171 
3172   if (!DemandedMask.isAllOnesValue()) {
3173     auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3174     Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I);
3175     InsertedInsts.push_back(Result);
3176   }
3177 
3178   // We may need to zeroextend back to the result type.
3179   if (ITy != Result->getType()) {
3180     auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I);
3181     InsertedInsts.push_back(ExtInst);
3182   }
3183 
3184   return true;
3185 }
3186 
3187 // CodeGen has special handling for some string functions that may replace
3188 // them with target-specific intrinsics.  Since that'd skip our interceptors
3189 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3190 // we mark affected calls as NoBuiltin, which will disable optimization
3191 // in CodeGen.
3192 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
3193     CallInst *CI, const TargetLibraryInfo *TLI) {
3194   Function *F = CI->getCalledFunction();
3195   LibFunc Func;
3196   if (F && !F->hasLocalLinkage() && F->hasName() &&
3197       TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3198       !F->doesNotAccessMemory())
3199     CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
3200 }
3201 
3202 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
3203   // We can't have a PHI with a metadata type.
3204   if (I->getOperand(OpIdx)->getType()->isMetadataTy())
3205     return false;
3206 
3207   // Early exit.
3208   if (!isa<Constant>(I->getOperand(OpIdx)))
3209     return true;
3210 
3211   switch (I->getOpcode()) {
3212   default:
3213     return true;
3214   case Instruction::Call:
3215   case Instruction::Invoke: {
3216     const auto &CB = cast<CallBase>(*I);
3217 
3218     // Can't handle inline asm. Skip it.
3219     if (CB.isInlineAsm())
3220       return false;
3221 
3222     // Constant bundle operands may need to retain their constant-ness for
3223     // correctness.
3224     if (CB.isBundleOperand(OpIdx))
3225       return false;
3226 
3227     if (OpIdx < CB.getNumArgOperands()) {
3228       // Some variadic intrinsics require constants in the variadic arguments,
3229       // which currently aren't markable as immarg.
3230       if (isa<IntrinsicInst>(CB) &&
3231           OpIdx >= CB.getFunctionType()->getNumParams()) {
3232         // This is known to be OK for stackmap.
3233         return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3234       }
3235 
3236       // gcroot is a special case, since it requires a constant argument which
3237       // isn't also required to be a simple ConstantInt.
3238       if (CB.getIntrinsicID() == Intrinsic::gcroot)
3239         return false;
3240 
3241       // Some intrinsic operands are required to be immediates.
3242       return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3243     }
3244 
3245     // It is never allowed to replace the call argument to an intrinsic, but it
3246     // may be possible for a call.
3247     return !isa<IntrinsicInst>(CB);
3248   }
3249   case Instruction::ShuffleVector:
3250     // Shufflevector masks are constant.
3251     return OpIdx != 2;
3252   case Instruction::Switch:
3253   case Instruction::ExtractValue:
3254     // All operands apart from the first are constant.
3255     return OpIdx == 0;
3256   case Instruction::InsertValue:
3257     // All operands apart from the first and the second are constant.
3258     return OpIdx < 2;
3259   case Instruction::Alloca:
3260     // Static allocas (constant size in the entry block) are handled by
3261     // prologue/epilogue insertion so they're free anyway. We definitely don't
3262     // want to make them non-constant.
3263     return !cast<AllocaInst>(I)->isStaticAlloca();
3264   case Instruction::GetElementPtr:
3265     if (OpIdx == 0)
3266       return true;
3267     gep_type_iterator It = gep_type_begin(I);
3268     for (auto E = std::next(It, OpIdx); It != E; ++It)
3269       if (It.isStruct())
3270         return false;
3271     return true;
3272   }
3273 }
3274 
3275 Value *llvm::invertCondition(Value *Condition) {
3276   // First: Check if it's a constant
3277   if (Constant *C = dyn_cast<Constant>(Condition))
3278     return ConstantExpr::getNot(C);
3279 
3280   // Second: If the condition is already inverted, return the original value
3281   Value *NotCondition;
3282   if (match(Condition, m_Not(m_Value(NotCondition))))
3283     return NotCondition;
3284 
3285   BasicBlock *Parent = nullptr;
3286   Instruction *Inst = dyn_cast<Instruction>(Condition);
3287   if (Inst)
3288     Parent = Inst->getParent();
3289   else if (Argument *Arg = dyn_cast<Argument>(Condition))
3290     Parent = &Arg->getParent()->getEntryBlock();
3291   assert(Parent && "Unsupported condition to invert");
3292 
3293   // Third: Check all the users for an invert
3294   for (User *U : Condition->users())
3295     if (Instruction *I = dyn_cast<Instruction>(U))
3296       if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
3297         return I;
3298 
3299   // Last option: Create a new instruction
3300   auto *Inverted =
3301       BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
3302   if (Inst && !isa<PHINode>(Inst))
3303     Inverted->insertAfter(Inst);
3304   else
3305     Inverted->insertBefore(&*Parent->getFirstInsertionPt());
3306   return Inverted;
3307 }
3308 
3309 bool llvm::inferAttributesFromOthers(Function &F) {
3310   // Note: We explicitly check for attributes rather than using cover functions
3311   // because some of the cover functions include the logic being implemented.
3312 
3313   bool Changed = false;
3314   // readnone + not convergent implies nosync
3315   if (!F.hasFnAttribute(Attribute::NoSync) &&
3316       F.doesNotAccessMemory() && !F.isConvergent()) {
3317     F.setNoSync();
3318     Changed = true;
3319   }
3320 
3321   // readonly implies nofree
3322   if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
3323     F.setDoesNotFreeMemory();
3324     Changed = true;
3325   }
3326 
3327   // willreturn implies mustprogress
3328   if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
3329     F.setMustProgress();
3330     Changed = true;
3331   }
3332 
3333   // TODO: There are a bunch of cases of restrictive memory effects we
3334   // can infer by inspecting arguments of argmemonly-ish functions.
3335 
3336   return Changed;
3337 }
3338