1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs global value numbering to eliminate fully redundant
10 // instructions.  It also performs simple dead load elimination.
11 //
12 // Note that this pass does the value numbering itself; it does not use the
13 // ValueNumbering analysis passes.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Scalar/GVN.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/MapVector.h"
22 #include "llvm/ADT/PointerIntPair.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/AssumeBundleQueries.h"
31 #include "llvm/Analysis/AssumptionCache.h"
32 #include "llvm/Analysis/CFG.h"
33 #include "llvm/Analysis/DomTreeUpdater.h"
34 #include "llvm/Analysis/GlobalsModRef.h"
35 #include "llvm/Analysis/InstructionSimplify.h"
36 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/Analysis/MemoryBuiltins.h"
38 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
39 #include "llvm/Analysis/MemorySSA.h"
40 #include "llvm/Analysis/MemorySSAUpdater.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/PHITransAddr.h"
43 #include "llvm/Analysis/TargetLibraryInfo.h"
44 #include "llvm/Analysis/ValueTracking.h"
45 #include "llvm/Config/llvm-config.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/IR/Dominators.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/Metadata.h"
61 #include "llvm/IR/Module.h"
62 #include "llvm/IR/Operator.h"
63 #include "llvm/IR/PassManager.h"
64 #include "llvm/IR/PatternMatch.h"
65 #include "llvm/IR/Statepoint.h"
66 #include "llvm/IR/Type.h"
67 #include "llvm/IR/Use.h"
68 #include "llvm/IR/Value.h"
69 #include "llvm/InitializePasses.h"
70 #include "llvm/Pass.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/CommandLine.h"
73 #include "llvm/Support/Compiler.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/raw_ostream.h"
76 #include "llvm/Transforms/Utils.h"
77 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
78 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
79 #include "llvm/Transforms/Utils/Local.h"
80 #include "llvm/Transforms/Utils/SSAUpdater.h"
81 #include "llvm/Transforms/Utils/VNCoercion.h"
82 #include <algorithm>
83 #include <cassert>
84 #include <cstdint>
85 #include <utility>
86 #include <vector>
87 
88 using namespace llvm;
89 using namespace llvm::gvn;
90 using namespace llvm::VNCoercion;
91 using namespace PatternMatch;
92 
93 #define DEBUG_TYPE "gvn"
94 
95 STATISTIC(NumGVNInstr,  "Number of instructions deleted");
96 STATISTIC(NumGVNLoad,   "Number of loads deleted");
97 STATISTIC(NumGVNPRE,    "Number of instructions PRE'd");
98 STATISTIC(NumGVNBlocks, "Number of blocks merged");
99 STATISTIC(NumGVNSimpl,  "Number of instructions simplified");
100 STATISTIC(NumGVNEqProp, "Number of equalities propagated");
101 STATISTIC(NumPRELoad,   "Number of loads PRE'd");
102 
103 STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
104           "Number of blocks speculated as available in "
105           "IsValueFullyAvailableInBlock(), max");
106 STATISTIC(MaxBBSpeculationCutoffReachedTimes,
107           "Number of times we we reached gvn-max-block-speculations cut-off "
108           "preventing further exploration");
109 
110 static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
111 static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
112 static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
113                                             cl::init(true));
114 static cl::opt<bool>
115 GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
116                                 cl::init(true));
117 static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
118 
119 static cl::opt<uint32_t> MaxNumDeps(
120     "gvn-max-num-deps", cl::Hidden, cl::init(100), cl::ZeroOrMore,
121     cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
122 
123 // This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
124 static cl::opt<uint32_t> MaxBBSpeculations(
125     "gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::ZeroOrMore,
126     cl::desc("Max number of blocks we're willing to speculate on (and recurse "
127              "into) when deducing if a value is fully available or not in GVN "
128              "(default = 600)"));
129 
130 struct llvm::GVN::Expression {
131   uint32_t opcode;
132   bool commutative = false;
133   Type *type = nullptr;
134   SmallVector<uint32_t, 4> varargs;
135 
136   Expression(uint32_t o = ~2U) : opcode(o) {}
137 
138   bool operator==(const Expression &other) const {
139     if (opcode != other.opcode)
140       return false;
141     if (opcode == ~0U || opcode == ~1U)
142       return true;
143     if (type != other.type)
144       return false;
145     if (varargs != other.varargs)
146       return false;
147     return true;
148   }
149 
150   friend hash_code hash_value(const Expression &Value) {
151     return hash_combine(
152         Value.opcode, Value.type,
153         hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
154   }
155 };
156 
157 namespace llvm {
158 
159 template <> struct DenseMapInfo<GVN::Expression> {
160   static inline GVN::Expression getEmptyKey() { return ~0U; }
161   static inline GVN::Expression getTombstoneKey() { return ~1U; }
162 
163   static unsigned getHashValue(const GVN::Expression &e) {
164     using llvm::hash_value;
165 
166     return static_cast<unsigned>(hash_value(e));
167   }
168 
169   static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) {
170     return LHS == RHS;
171   }
172 };
173 
174 } // end namespace llvm
175 
176 /// Represents a particular available value that we know how to materialize.
177 /// Materialization of an AvailableValue never fails.  An AvailableValue is
178 /// implicitly associated with a rematerialization point which is the
179 /// location of the instruction from which it was formed.
180 struct llvm::gvn::AvailableValue {
181   enum ValType {
182     SimpleVal, // A simple offsetted value that is accessed.
183     LoadVal,   // A value produced by a load.
184     MemIntrin, // A memory intrinsic which is loaded from.
185     UndefVal   // A UndefValue representing a value from dead block (which
186                // is not yet physically removed from the CFG).
187   };
188 
189   /// V - The value that is live out of the block.
190   PointerIntPair<Value *, 2, ValType> Val;
191 
192   /// Offset - The byte offset in Val that is interesting for the load query.
193   unsigned Offset = 0;
194 
195   static AvailableValue get(Value *V, unsigned Offset = 0) {
196     AvailableValue Res;
197     Res.Val.setPointer(V);
198     Res.Val.setInt(SimpleVal);
199     Res.Offset = Offset;
200     return Res;
201   }
202 
203   static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
204     AvailableValue Res;
205     Res.Val.setPointer(MI);
206     Res.Val.setInt(MemIntrin);
207     Res.Offset = Offset;
208     return Res;
209   }
210 
211   static AvailableValue getLoad(LoadInst *LI, unsigned Offset = 0) {
212     AvailableValue Res;
213     Res.Val.setPointer(LI);
214     Res.Val.setInt(LoadVal);
215     Res.Offset = Offset;
216     return Res;
217   }
218 
219   static AvailableValue getUndef() {
220     AvailableValue Res;
221     Res.Val.setPointer(nullptr);
222     Res.Val.setInt(UndefVal);
223     Res.Offset = 0;
224     return Res;
225   }
226 
227   bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
228   bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; }
229   bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; }
230   bool isUndefValue() const { return Val.getInt() == UndefVal; }
231 
232   Value *getSimpleValue() const {
233     assert(isSimpleValue() && "Wrong accessor");
234     return Val.getPointer();
235   }
236 
237   LoadInst *getCoercedLoadValue() const {
238     assert(isCoercedLoadValue() && "Wrong accessor");
239     return cast<LoadInst>(Val.getPointer());
240   }
241 
242   MemIntrinsic *getMemIntrinValue() const {
243     assert(isMemIntrinValue() && "Wrong accessor");
244     return cast<MemIntrinsic>(Val.getPointer());
245   }
246 
247   /// Emit code at the specified insertion point to adjust the value defined
248   /// here to the specified type. This handles various coercion cases.
249   Value *MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt,
250                                   GVN &gvn) const;
251 };
252 
253 /// Represents an AvailableValue which can be rematerialized at the end of
254 /// the associated BasicBlock.
255 struct llvm::gvn::AvailableValueInBlock {
256   /// BB - The basic block in question.
257   BasicBlock *BB = nullptr;
258 
259   /// AV - The actual available value
260   AvailableValue AV;
261 
262   static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
263     AvailableValueInBlock Res;
264     Res.BB = BB;
265     Res.AV = std::move(AV);
266     return Res;
267   }
268 
269   static AvailableValueInBlock get(BasicBlock *BB, Value *V,
270                                    unsigned Offset = 0) {
271     return get(BB, AvailableValue::get(V, Offset));
272   }
273 
274   static AvailableValueInBlock getUndef(BasicBlock *BB) {
275     return get(BB, AvailableValue::getUndef());
276   }
277 
278   /// Emit code at the end of this block to adjust the value defined here to
279   /// the specified type. This handles various coercion cases.
280   Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const {
281     return AV.MaterializeAdjustedValue(LI, BB->getTerminator(), gvn);
282   }
283 };
284 
285 //===----------------------------------------------------------------------===//
286 //                     ValueTable Internal Functions
287 //===----------------------------------------------------------------------===//
288 
289 GVN::Expression GVN::ValueTable::createExpr(Instruction *I) {
290   Expression e;
291   e.type = I->getType();
292   e.opcode = I->getOpcode();
293   if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
294     // gc.relocate is 'special' call: its second and third operands are
295     // not real values, but indices into statepoint's argument list.
296     // Use the refered to values for purposes of identity.
297     e.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
298     e.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
299     e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
300   } else {
301     for (Use &Op : I->operands())
302       e.varargs.push_back(lookupOrAdd(Op));
303   }
304   if (I->isCommutative()) {
305     // Ensure that commutative instructions that only differ by a permutation
306     // of their operands get the same value number by sorting the operand value
307     // numbers.  Since commutative operands are the 1st two operands it is more
308     // efficient to sort by hand rather than using, say, std::sort.
309     assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
310     if (e.varargs[0] > e.varargs[1])
311       std::swap(e.varargs[0], e.varargs[1]);
312     e.commutative = true;
313   }
314 
315   if (auto *C = dyn_cast<CmpInst>(I)) {
316     // Sort the operand value numbers so x<y and y>x get the same value number.
317     CmpInst::Predicate Predicate = C->getPredicate();
318     if (e.varargs[0] > e.varargs[1]) {
319       std::swap(e.varargs[0], e.varargs[1]);
320       Predicate = CmpInst::getSwappedPredicate(Predicate);
321     }
322     e.opcode = (C->getOpcode() << 8) | Predicate;
323     e.commutative = true;
324   } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
325     e.varargs.append(E->idx_begin(), E->idx_end());
326   } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
327     ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
328     e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
329   }
330 
331   return e;
332 }
333 
334 GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode,
335                                                CmpInst::Predicate Predicate,
336                                                Value *LHS, Value *RHS) {
337   assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
338          "Not a comparison!");
339   Expression e;
340   e.type = CmpInst::makeCmpResultType(LHS->getType());
341   e.varargs.push_back(lookupOrAdd(LHS));
342   e.varargs.push_back(lookupOrAdd(RHS));
343 
344   // Sort the operand value numbers so x<y and y>x get the same value number.
345   if (e.varargs[0] > e.varargs[1]) {
346     std::swap(e.varargs[0], e.varargs[1]);
347     Predicate = CmpInst::getSwappedPredicate(Predicate);
348   }
349   e.opcode = (Opcode << 8) | Predicate;
350   e.commutative = true;
351   return e;
352 }
353 
354 GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
355   assert(EI && "Not an ExtractValueInst?");
356   Expression e;
357   e.type = EI->getType();
358   e.opcode = 0;
359 
360   WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
361   if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
362     // EI is an extract from one of our with.overflow intrinsics. Synthesize
363     // a semantically equivalent expression instead of an extract value
364     // expression.
365     e.opcode = WO->getBinaryOp();
366     e.varargs.push_back(lookupOrAdd(WO->getLHS()));
367     e.varargs.push_back(lookupOrAdd(WO->getRHS()));
368     return e;
369   }
370 
371   // Not a recognised intrinsic. Fall back to producing an extract value
372   // expression.
373   e.opcode = EI->getOpcode();
374   for (Use &Op : EI->operands())
375     e.varargs.push_back(lookupOrAdd(Op));
376 
377   append_range(e.varargs, EI->indices());
378 
379   return e;
380 }
381 
382 //===----------------------------------------------------------------------===//
383 //                     ValueTable External Functions
384 //===----------------------------------------------------------------------===//
385 
386 GVN::ValueTable::ValueTable() = default;
387 GVN::ValueTable::ValueTable(const ValueTable &) = default;
388 GVN::ValueTable::ValueTable(ValueTable &&) = default;
389 GVN::ValueTable::~ValueTable() = default;
390 GVN::ValueTable &GVN::ValueTable::operator=(const GVN::ValueTable &Arg) = default;
391 
392 /// add - Insert a value into the table with a specified value number.
393 void GVN::ValueTable::add(Value *V, uint32_t num) {
394   valueNumbering.insert(std::make_pair(V, num));
395   if (PHINode *PN = dyn_cast<PHINode>(V))
396     NumberingPhi[num] = PN;
397 }
398 
399 uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
400   if (AA->doesNotAccessMemory(C)) {
401     Expression exp = createExpr(C);
402     uint32_t e = assignExpNewValueNum(exp).first;
403     valueNumbering[C] = e;
404     return e;
405   } else if (MD && AA->onlyReadsMemory(C)) {
406     Expression exp = createExpr(C);
407     auto ValNum = assignExpNewValueNum(exp);
408     if (ValNum.second) {
409       valueNumbering[C] = ValNum.first;
410       return ValNum.first;
411     }
412 
413     MemDepResult local_dep = MD->getDependency(C);
414 
415     if (!local_dep.isDef() && !local_dep.isNonLocal()) {
416       valueNumbering[C] =  nextValueNumber;
417       return nextValueNumber++;
418     }
419 
420     if (local_dep.isDef()) {
421       // For masked load/store intrinsics, the local_dep may actully be
422       // a normal load or store instruction.
423       CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
424 
425       if (!local_cdep ||
426           local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
427         valueNumbering[C] = nextValueNumber;
428         return nextValueNumber++;
429       }
430 
431       for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
432         uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
433         uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
434         if (c_vn != cd_vn) {
435           valueNumbering[C] = nextValueNumber;
436           return nextValueNumber++;
437         }
438       }
439 
440       uint32_t v = lookupOrAdd(local_cdep);
441       valueNumbering[C] = v;
442       return v;
443     }
444 
445     // Non-local case.
446     const MemoryDependenceResults::NonLocalDepInfo &deps =
447         MD->getNonLocalCallDependency(C);
448     // FIXME: Move the checking logic to MemDep!
449     CallInst* cdep = nullptr;
450 
451     // Check to see if we have a single dominating call instruction that is
452     // identical to C.
453     for (unsigned i = 0, e = deps.size(); i != e; ++i) {
454       const NonLocalDepEntry *I = &deps[i];
455       if (I->getResult().isNonLocal())
456         continue;
457 
458       // We don't handle non-definitions.  If we already have a call, reject
459       // instruction dependencies.
460       if (!I->getResult().isDef() || cdep != nullptr) {
461         cdep = nullptr;
462         break;
463       }
464 
465       CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
466       // FIXME: All duplicated with non-local case.
467       if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
468         cdep = NonLocalDepCall;
469         continue;
470       }
471 
472       cdep = nullptr;
473       break;
474     }
475 
476     if (!cdep) {
477       valueNumbering[C] = nextValueNumber;
478       return nextValueNumber++;
479     }
480 
481     if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
482       valueNumbering[C] = nextValueNumber;
483       return nextValueNumber++;
484     }
485     for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
486       uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
487       uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
488       if (c_vn != cd_vn) {
489         valueNumbering[C] = nextValueNumber;
490         return nextValueNumber++;
491       }
492     }
493 
494     uint32_t v = lookupOrAdd(cdep);
495     valueNumbering[C] = v;
496     return v;
497   } else {
498     valueNumbering[C] = nextValueNumber;
499     return nextValueNumber++;
500   }
501 }
502 
503 /// Returns true if a value number exists for the specified value.
504 bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; }
505 
506 /// lookup_or_add - Returns the value number for the specified value, assigning
507 /// it a new number if it did not have one before.
508 uint32_t GVN::ValueTable::lookupOrAdd(Value *V) {
509   DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
510   if (VI != valueNumbering.end())
511     return VI->second;
512 
513   if (!isa<Instruction>(V)) {
514     valueNumbering[V] = nextValueNumber;
515     return nextValueNumber++;
516   }
517 
518   Instruction* I = cast<Instruction>(V);
519   Expression exp;
520   switch (I->getOpcode()) {
521     case Instruction::Call:
522       return lookupOrAddCall(cast<CallInst>(I));
523     case Instruction::FNeg:
524     case Instruction::Add:
525     case Instruction::FAdd:
526     case Instruction::Sub:
527     case Instruction::FSub:
528     case Instruction::Mul:
529     case Instruction::FMul:
530     case Instruction::UDiv:
531     case Instruction::SDiv:
532     case Instruction::FDiv:
533     case Instruction::URem:
534     case Instruction::SRem:
535     case Instruction::FRem:
536     case Instruction::Shl:
537     case Instruction::LShr:
538     case Instruction::AShr:
539     case Instruction::And:
540     case Instruction::Or:
541     case Instruction::Xor:
542     case Instruction::ICmp:
543     case Instruction::FCmp:
544     case Instruction::Trunc:
545     case Instruction::ZExt:
546     case Instruction::SExt:
547     case Instruction::FPToUI:
548     case Instruction::FPToSI:
549     case Instruction::UIToFP:
550     case Instruction::SIToFP:
551     case Instruction::FPTrunc:
552     case Instruction::FPExt:
553     case Instruction::PtrToInt:
554     case Instruction::IntToPtr:
555     case Instruction::AddrSpaceCast:
556     case Instruction::BitCast:
557     case Instruction::Select:
558     case Instruction::Freeze:
559     case Instruction::ExtractElement:
560     case Instruction::InsertElement:
561     case Instruction::ShuffleVector:
562     case Instruction::InsertValue:
563     case Instruction::GetElementPtr:
564       exp = createExpr(I);
565       break;
566     case Instruction::ExtractValue:
567       exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
568       break;
569     case Instruction::PHI:
570       valueNumbering[V] = nextValueNumber;
571       NumberingPhi[nextValueNumber] = cast<PHINode>(V);
572       return nextValueNumber++;
573     default:
574       valueNumbering[V] = nextValueNumber;
575       return nextValueNumber++;
576   }
577 
578   uint32_t e = assignExpNewValueNum(exp).first;
579   valueNumbering[V] = e;
580   return e;
581 }
582 
583 /// Returns the value number of the specified value. Fails if
584 /// the value has not yet been numbered.
585 uint32_t GVN::ValueTable::lookup(Value *V, bool Verify) const {
586   DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
587   if (Verify) {
588     assert(VI != valueNumbering.end() && "Value not numbered?");
589     return VI->second;
590   }
591   return (VI != valueNumbering.end()) ? VI->second : 0;
592 }
593 
594 /// Returns the value number of the given comparison,
595 /// assigning it a new number if it did not have one before.  Useful when
596 /// we deduced the result of a comparison, but don't immediately have an
597 /// instruction realizing that comparison to hand.
598 uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode,
599                                          CmpInst::Predicate Predicate,
600                                          Value *LHS, Value *RHS) {
601   Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
602   return assignExpNewValueNum(exp).first;
603 }
604 
605 /// Remove all entries from the ValueTable.
606 void GVN::ValueTable::clear() {
607   valueNumbering.clear();
608   expressionNumbering.clear();
609   NumberingPhi.clear();
610   PhiTranslateTable.clear();
611   nextValueNumber = 1;
612   Expressions.clear();
613   ExprIdx.clear();
614   nextExprNumber = 0;
615 }
616 
617 /// Remove a value from the value numbering.
618 void GVN::ValueTable::erase(Value *V) {
619   uint32_t Num = valueNumbering.lookup(V);
620   valueNumbering.erase(V);
621   // If V is PHINode, V <--> value number is an one-to-one mapping.
622   if (isa<PHINode>(V))
623     NumberingPhi.erase(Num);
624 }
625 
626 /// verifyRemoved - Verify that the value is removed from all internal data
627 /// structures.
628 void GVN::ValueTable::verifyRemoved(const Value *V) const {
629   for (DenseMap<Value*, uint32_t>::const_iterator
630          I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
631     assert(I->first != V && "Inst still occurs in value numbering map!");
632   }
633 }
634 
635 //===----------------------------------------------------------------------===//
636 //                                GVN Pass
637 //===----------------------------------------------------------------------===//
638 
639 bool GVN::isPREEnabled() const {
640   return Options.AllowPRE.getValueOr(GVNEnablePRE);
641 }
642 
643 bool GVN::isLoadPREEnabled() const {
644   return Options.AllowLoadPRE.getValueOr(GVNEnableLoadPRE);
645 }
646 
647 bool GVN::isLoadInLoopPREEnabled() const {
648   return Options.AllowLoadInLoopPRE.getValueOr(GVNEnableLoadInLoopPRE);
649 }
650 
651 bool GVN::isLoadPRESplitBackedgeEnabled() const {
652   return Options.AllowLoadPRESplitBackedge.getValueOr(
653       GVNEnableSplitBackedgeInLoadPRE);
654 }
655 
656 bool GVN::isMemDepEnabled() const {
657   return Options.AllowMemDep.getValueOr(GVNEnableMemDep);
658 }
659 
660 PreservedAnalyses GVN::run(Function &F, FunctionAnalysisManager &AM) {
661   // FIXME: The order of evaluation of these 'getResult' calls is very
662   // significant! Re-ordering these variables will cause GVN when run alone to
663   // be less effective! We should fix memdep and basic-aa to not exhibit this
664   // behavior, but until then don't change the order here.
665   auto &AC = AM.getResult<AssumptionAnalysis>(F);
666   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
667   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
668   auto &AA = AM.getResult<AAManager>(F);
669   auto *MemDep =
670       isMemDepEnabled() ? &AM.getResult<MemoryDependenceAnalysis>(F) : nullptr;
671   auto *LI = AM.getCachedResult<LoopAnalysis>(F);
672   auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
673   auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
674   bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
675                          MSSA ? &MSSA->getMSSA() : nullptr);
676   if (!Changed)
677     return PreservedAnalyses::all();
678   PreservedAnalyses PA;
679   PA.preserve<DominatorTreeAnalysis>();
680   PA.preserve<GlobalsAA>();
681   PA.preserve<TargetLibraryAnalysis>();
682   if (MSSA)
683     PA.preserve<MemorySSAAnalysis>();
684   if (LI)
685     PA.preserve<LoopAnalysis>();
686   return PA;
687 }
688 
689 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
690 LLVM_DUMP_METHOD void GVN::dump(DenseMap<uint32_t, Value*>& d) const {
691   errs() << "{\n";
692   for (auto &I : d) {
693     errs() << I.first << "\n";
694     I.second->dump();
695   }
696   errs() << "}\n";
697 }
698 #endif
699 
700 enum class AvailabilityState : char {
701   /// We know the block *is not* fully available. This is a fixpoint.
702   Unavailable = 0,
703   /// We know the block *is* fully available. This is a fixpoint.
704   Available = 1,
705   /// We do not know whether the block is fully available or not,
706   /// but we are currently speculating that it will be.
707   /// If it would have turned out that the block was, in fact, not fully
708   /// available, this would have been cleaned up into an Unavailable.
709   SpeculativelyAvailable = 2,
710 };
711 
712 /// Return true if we can prove that the value
713 /// we're analyzing is fully available in the specified block.  As we go, keep
714 /// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
715 /// map is actually a tri-state map with the following values:
716 ///   0) we know the block *is not* fully available.
717 ///   1) we know the block *is* fully available.
718 ///   2) we do not know whether the block is fully available or not, but we are
719 ///      currently speculating that it will be.
720 static bool IsValueFullyAvailableInBlock(
721     BasicBlock *BB,
722     DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
723   SmallVector<BasicBlock *, 32> Worklist;
724   Optional<BasicBlock *> UnavailableBB;
725 
726   // The number of times we didn't find an entry for a block in a map and
727   // optimistically inserted an entry marking block as speculatively available.
728   unsigned NumNewNewSpeculativelyAvailableBBs = 0;
729 
730 #ifndef NDEBUG
731   SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
732   SmallVector<BasicBlock *, 32> AvailableBBs;
733 #endif
734 
735   Worklist.emplace_back(BB);
736   while (!Worklist.empty()) {
737     BasicBlock *CurrBB = Worklist.pop_back_val(); // LIFO - depth-first!
738     // Optimistically assume that the block is Speculatively Available and check
739     // to see if we already know about this block in one lookup.
740     std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
741         FullyAvailableBlocks.try_emplace(
742             CurrBB, AvailabilityState::SpeculativelyAvailable);
743     AvailabilityState &State = IV.first->second;
744 
745     // Did the entry already exist for this block?
746     if (!IV.second) {
747       if (State == AvailabilityState::Unavailable) {
748         UnavailableBB = CurrBB;
749         break; // Backpropagate unavailability info.
750       }
751 
752 #ifndef NDEBUG
753       AvailableBBs.emplace_back(CurrBB);
754 #endif
755       continue; // Don't recurse further, but continue processing worklist.
756     }
757 
758     // No entry found for block.
759     ++NumNewNewSpeculativelyAvailableBBs;
760     bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
761 
762     // If we have exhausted our budget, mark this block as unavailable.
763     // Also, if this block has no predecessors, the value isn't live-in here.
764     if (OutOfBudget || pred_empty(CurrBB)) {
765       MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
766       State = AvailabilityState::Unavailable;
767       UnavailableBB = CurrBB;
768       break; // Backpropagate unavailability info.
769     }
770 
771     // Tentatively consider this block as speculatively available.
772 #ifndef NDEBUG
773     NewSpeculativelyAvailableBBs.insert(CurrBB);
774 #endif
775     // And further recurse into block's predecessors, in depth-first order!
776     Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
777   }
778 
779 #if LLVM_ENABLE_STATS
780   IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
781       NumNewNewSpeculativelyAvailableBBs);
782 #endif
783 
784   // If the block isn't marked as fixpoint yet
785   // (the Unavailable and Available states are fixpoints)
786   auto MarkAsFixpointAndEnqueueSuccessors =
787       [&](BasicBlock *BB, AvailabilityState FixpointState) {
788         auto It = FullyAvailableBlocks.find(BB);
789         if (It == FullyAvailableBlocks.end())
790           return; // Never queried this block, leave as-is.
791         switch (AvailabilityState &State = It->second) {
792         case AvailabilityState::Unavailable:
793         case AvailabilityState::Available:
794           return; // Don't backpropagate further, continue processing worklist.
795         case AvailabilityState::SpeculativelyAvailable: // Fix it!
796           State = FixpointState;
797 #ifndef NDEBUG
798           assert(NewSpeculativelyAvailableBBs.erase(BB) &&
799                  "Found a speculatively available successor leftover?");
800 #endif
801           // Queue successors for further processing.
802           Worklist.append(succ_begin(BB), succ_end(BB));
803           return;
804         }
805       };
806 
807   if (UnavailableBB) {
808     // Okay, we have encountered an unavailable block.
809     // Mark speculatively available blocks reachable from UnavailableBB as
810     // unavailable as well. Paths are terminated when they reach blocks not in
811     // FullyAvailableBlocks or they are not marked as speculatively available.
812     Worklist.clear();
813     Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
814     while (!Worklist.empty())
815       MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
816                                          AvailabilityState::Unavailable);
817   }
818 
819 #ifndef NDEBUG
820   Worklist.clear();
821   for (BasicBlock *AvailableBB : AvailableBBs)
822     Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
823   while (!Worklist.empty())
824     MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
825                                        AvailabilityState::Available);
826 
827   assert(NewSpeculativelyAvailableBBs.empty() &&
828          "Must have fixed all the new speculatively available blocks.");
829 #endif
830 
831   return !UnavailableBB;
832 }
833 
834 /// Given a set of loads specified by ValuesPerBlock,
835 /// construct SSA form, allowing us to eliminate LI.  This returns the value
836 /// that should be used at LI's definition site.
837 static Value *ConstructSSAForLoadSet(LoadInst *LI,
838                          SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
839                                      GVN &gvn) {
840   // Check for the fully redundant, dominating load case.  In this case, we can
841   // just use the dominating value directly.
842   if (ValuesPerBlock.size() == 1 &&
843       gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
844                                                LI->getParent())) {
845     assert(!ValuesPerBlock[0].AV.isUndefValue() &&
846            "Dead BB dominate this block");
847     return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn);
848   }
849 
850   // Otherwise, we have to construct SSA form.
851   SmallVector<PHINode*, 8> NewPHIs;
852   SSAUpdater SSAUpdate(&NewPHIs);
853   SSAUpdate.Initialize(LI->getType(), LI->getName());
854 
855   for (const AvailableValueInBlock &AV : ValuesPerBlock) {
856     BasicBlock *BB = AV.BB;
857 
858     if (AV.AV.isUndefValue())
859       continue;
860 
861     if (SSAUpdate.HasValueForBlock(BB))
862       continue;
863 
864     // If the value is the load that we will be eliminating, and the block it's
865     // available in is the block that the load is in, then don't add it as
866     // SSAUpdater will resolve the value to the relevant phi which may let it
867     // avoid phi construction entirely if there's actually only one value.
868     if (BB == LI->getParent() &&
869         ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == LI) ||
870          (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == LI)))
871       continue;
872 
873     SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn));
874   }
875 
876   // Perform PHI construction.
877   return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
878 }
879 
880 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI,
881                                                 Instruction *InsertPt,
882                                                 GVN &gvn) const {
883   Value *Res;
884   Type *LoadTy = LI->getType();
885   const DataLayout &DL = LI->getModule()->getDataLayout();
886   if (isSimpleValue()) {
887     Res = getSimpleValue();
888     if (Res->getType() != LoadTy) {
889       Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
890 
891       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
892                         << "  " << *getSimpleValue() << '\n'
893                         << *Res << '\n'
894                         << "\n\n\n");
895     }
896   } else if (isCoercedLoadValue()) {
897     LoadInst *Load = getCoercedLoadValue();
898     if (Load->getType() == LoadTy && Offset == 0) {
899       Res = Load;
900     } else {
901       Res = getLoadValueForLoad(Load, Offset, LoadTy, InsertPt, DL);
902       // We would like to use gvn.markInstructionForDeletion here, but we can't
903       // because the load is already memoized into the leader map table that GVN
904       // tracks.  It is potentially possible to remove the load from the table,
905       // but then there all of the operations based on it would need to be
906       // rehashed.  Just leave the dead load around.
907       gvn.getMemDep().removeInstruction(Load);
908       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
909                         << "  " << *getCoercedLoadValue() << '\n'
910                         << *Res << '\n'
911                         << "\n\n\n");
912     }
913   } else if (isMemIntrinValue()) {
914     Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
915                                  InsertPt, DL);
916     LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
917                       << "  " << *getMemIntrinValue() << '\n'
918                       << *Res << '\n'
919                       << "\n\n\n");
920   } else {
921     llvm_unreachable("Should not materialize value from dead block");
922   }
923   assert(Res && "failed to materialize?");
924   return Res;
925 }
926 
927 static bool isLifetimeStart(const Instruction *Inst) {
928   if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
929     return II->getIntrinsicID() == Intrinsic::lifetime_start;
930   return false;
931 }
932 
933 /// Try to locate the three instruction involved in a missed
934 /// load-elimination case that is due to an intervening store.
935 static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo,
936                                    DominatorTree *DT,
937                                    OptimizationRemarkEmitter *ORE) {
938   using namespace ore;
939 
940   User *OtherAccess = nullptr;
941 
942   OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", LI);
943   R << "load of type " << NV("Type", LI->getType()) << " not eliminated"
944     << setExtraArgs();
945 
946   for (auto *U : LI->getPointerOperand()->users())
947     if (U != LI && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
948         DT->dominates(cast<Instruction>(U), LI)) {
949       // FIXME: for now give up if there are multiple memory accesses that
950       // dominate the load.  We need further analysis to decide which one is
951       // that we're forwarding from.
952       if (OtherAccess)
953         OtherAccess = nullptr;
954       else
955         OtherAccess = U;
956     }
957 
958   if (OtherAccess)
959     R << " in favor of " << NV("OtherAccess", OtherAccess);
960 
961   R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
962 
963   ORE->emit(R);
964 }
965 
966 bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
967                                   Value *Address, AvailableValue &Res) {
968   assert((DepInfo.isDef() || DepInfo.isClobber()) &&
969          "expected a local dependence");
970   assert(LI->isUnordered() && "rules below are incorrect for ordered access");
971 
972   const DataLayout &DL = LI->getModule()->getDataLayout();
973 
974   Instruction *DepInst = DepInfo.getInst();
975   if (DepInfo.isClobber()) {
976     // If the dependence is to a store that writes to a superset of the bits
977     // read by the load, we can extract the bits we need for the load from the
978     // stored value.
979     if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
980       // Can't forward from non-atomic to atomic without violating memory model.
981       if (Address && LI->isAtomic() <= DepSI->isAtomic()) {
982         int Offset =
983           analyzeLoadFromClobberingStore(LI->getType(), Address, DepSI, DL);
984         if (Offset != -1) {
985           Res = AvailableValue::get(DepSI->getValueOperand(), Offset);
986           return true;
987         }
988       }
989     }
990 
991     // Check to see if we have something like this:
992     //    load i32* P
993     //    load i8* (P+1)
994     // if we have this, replace the later with an extraction from the former.
995     if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
996       // If this is a clobber and L is the first instruction in its block, then
997       // we have the first instruction in the entry block.
998       // Can't forward from non-atomic to atomic without violating memory model.
999       if (DepLI != LI && Address && LI->isAtomic() <= DepLI->isAtomic()) {
1000         int Offset =
1001           analyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL);
1002 
1003         if (Offset != -1) {
1004           Res = AvailableValue::getLoad(DepLI, Offset);
1005           return true;
1006         }
1007       }
1008     }
1009 
1010     // If the clobbering value is a memset/memcpy/memmove, see if we can
1011     // forward a value on from it.
1012     if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1013       if (Address && !LI->isAtomic()) {
1014         int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address,
1015                                                       DepMI, DL);
1016         if (Offset != -1) {
1017           Res = AvailableValue::getMI(DepMI, Offset);
1018           return true;
1019         }
1020       }
1021     }
1022     // Nothing known about this clobber, have to be conservative
1023     LLVM_DEBUG(
1024         // fast print dep, using operator<< on instruction is too slow.
1025         dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
1026         dbgs() << " is clobbered by " << *DepInst << '\n';);
1027     if (ORE->allowExtraAnalysis(DEBUG_TYPE))
1028       reportMayClobberedLoad(LI, DepInfo, DT, ORE);
1029 
1030     return false;
1031   }
1032   assert(DepInfo.isDef() && "follows from above");
1033 
1034   // Loading the allocation -> undef.
1035   if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
1036       isAlignedAllocLikeFn(DepInst, TLI) ||
1037       // Loading immediately after lifetime begin -> undef.
1038       isLifetimeStart(DepInst)) {
1039     Res = AvailableValue::get(UndefValue::get(LI->getType()));
1040     return true;
1041   }
1042 
1043   // Loading from calloc (which zero initializes memory) -> zero
1044   if (isCallocLikeFn(DepInst, TLI)) {
1045     Res = AvailableValue::get(Constant::getNullValue(LI->getType()));
1046     return true;
1047   }
1048 
1049   if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1050     // Reject loads and stores that are to the same address but are of
1051     // different types if we have to. If the stored value is convertable to
1052     // the loaded value, we can reuse it.
1053     if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(),
1054                                          DL))
1055       return false;
1056 
1057     // Can't forward from non-atomic to atomic without violating memory model.
1058     if (S->isAtomic() < LI->isAtomic())
1059       return false;
1060 
1061     Res = AvailableValue::get(S->getValueOperand());
1062     return true;
1063   }
1064 
1065   if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1066     // If the types mismatch and we can't handle it, reject reuse of the load.
1067     // If the stored value is larger or equal to the loaded value, we can reuse
1068     // it.
1069     if (!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
1070       return false;
1071 
1072     // Can't forward from non-atomic to atomic without violating memory model.
1073     if (LD->isAtomic() < LI->isAtomic())
1074       return false;
1075 
1076     Res = AvailableValue::getLoad(LD);
1077     return true;
1078   }
1079 
1080   // Unknown def - must be conservative
1081   LLVM_DEBUG(
1082       // fast print dep, using operator<< on instruction is too slow.
1083       dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
1084       dbgs() << " has unknown def " << *DepInst << '\n';);
1085   return false;
1086 }
1087 
1088 void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
1089                                   AvailValInBlkVect &ValuesPerBlock,
1090                                   UnavailBlkVect &UnavailableBlocks) {
1091   // Filter out useless results (non-locals, etc).  Keep track of the blocks
1092   // where we have a value available in repl, also keep track of whether we see
1093   // dependencies that produce an unknown value for the load (such as a call
1094   // that could potentially clobber the load).
1095   unsigned NumDeps = Deps.size();
1096   for (unsigned i = 0, e = NumDeps; i != e; ++i) {
1097     BasicBlock *DepBB = Deps[i].getBB();
1098     MemDepResult DepInfo = Deps[i].getResult();
1099 
1100     if (DeadBlocks.count(DepBB)) {
1101       // Dead dependent mem-op disguise as a load evaluating the same value
1102       // as the load in question.
1103       ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1104       continue;
1105     }
1106 
1107     if (!DepInfo.isDef() && !DepInfo.isClobber()) {
1108       UnavailableBlocks.push_back(DepBB);
1109       continue;
1110     }
1111 
1112     // The address being loaded in this non-local block may not be the same as
1113     // the pointer operand of the load if PHI translation occurs.  Make sure
1114     // to consider the right address.
1115     Value *Address = Deps[i].getAddress();
1116 
1117     AvailableValue AV;
1118     if (AnalyzeLoadAvailability(LI, DepInfo, Address, AV)) {
1119       // subtlety: because we know this was a non-local dependency, we know
1120       // it's safe to materialize anywhere between the instruction within
1121       // DepInfo and the end of it's block.
1122       ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1123                                                           std::move(AV)));
1124     } else {
1125       UnavailableBlocks.push_back(DepBB);
1126     }
1127   }
1128 
1129   assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1130          "post condition violation");
1131 }
1132 
1133 bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
1134                          UnavailBlkVect &UnavailableBlocks) {
1135   // Okay, we have *some* definitions of the value.  This means that the value
1136   // is available in some of our (transitive) predecessors.  Lets think about
1137   // doing PRE of this load.  This will involve inserting a new load into the
1138   // predecessor when it's not available.  We could do this in general, but
1139   // prefer to not increase code size.  As such, we only do this when we know
1140   // that we only have to insert *one* load (which means we're basically moving
1141   // the load, not inserting a new one).
1142 
1143   SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1144                                         UnavailableBlocks.end());
1145 
1146   // Let's find the first basic block with more than one predecessor.  Walk
1147   // backwards through predecessors if needed.
1148   BasicBlock *LoadBB = LI->getParent();
1149   BasicBlock *TmpBB = LoadBB;
1150 
1151   // Check that there is no implicit control flow instructions above our load in
1152   // its block. If there is an instruction that doesn't always pass the
1153   // execution to the following instruction, then moving through it may become
1154   // invalid. For example:
1155   //
1156   // int arr[LEN];
1157   // int index = ???;
1158   // ...
1159   // guard(0 <= index && index < LEN);
1160   // use(arr[index]);
1161   //
1162   // It is illegal to move the array access to any point above the guard,
1163   // because if the index is out of bounds we should deoptimize rather than
1164   // access the array.
1165   // Check that there is no guard in this block above our instruction.
1166   bool MustEnsureSafetyOfSpeculativeExecution =
1167       ICF->isDominatedByICFIFromSameBlock(LI);
1168 
1169   while (TmpBB->getSinglePredecessor()) {
1170     TmpBB = TmpBB->getSinglePredecessor();
1171     if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1172       return false;
1173     if (Blockers.count(TmpBB))
1174       return false;
1175 
1176     // If any of these blocks has more than one successor (i.e. if the edge we
1177     // just traversed was critical), then there are other paths through this
1178     // block along which the load may not be anticipated.  Hoisting the load
1179     // above this block would be adding the load to execution paths along
1180     // which it was not previously executed.
1181     if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1182       return false;
1183 
1184     // Check that there is no implicit control flow in a block above.
1185     MustEnsureSafetyOfSpeculativeExecution =
1186         MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1187   }
1188 
1189   assert(TmpBB);
1190   LoadBB = TmpBB;
1191 
1192   // Check to see how many predecessors have the loaded value fully
1193   // available.
1194   MapVector<BasicBlock *, Value *> PredLoads;
1195   DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1196   for (const AvailableValueInBlock &AV : ValuesPerBlock)
1197     FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1198   for (BasicBlock *UnavailableBB : UnavailableBlocks)
1199     FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1200 
1201   SmallVector<BasicBlock *, 4> CriticalEdgePred;
1202   for (BasicBlock *Pred : predecessors(LoadBB)) {
1203     // If any predecessor block is an EH pad that does not allow non-PHI
1204     // instructions before the terminator, we can't PRE the load.
1205     if (Pred->getTerminator()->isEHPad()) {
1206       LLVM_DEBUG(
1207           dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1208                  << Pred->getName() << "': " << *LI << '\n');
1209       return false;
1210     }
1211 
1212     if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1213       continue;
1214     }
1215 
1216     if (Pred->getTerminator()->getNumSuccessors() != 1) {
1217       if (isa<IndirectBrInst>(Pred->getTerminator())) {
1218         LLVM_DEBUG(
1219             dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1220                    << Pred->getName() << "': " << *LI << '\n');
1221         return false;
1222       }
1223 
1224       // FIXME: Can we support the fallthrough edge?
1225       if (isa<CallBrInst>(Pred->getTerminator())) {
1226         LLVM_DEBUG(
1227             dbgs() << "COULD NOT PRE LOAD BECAUSE OF CALLBR CRITICAL EDGE '"
1228                    << Pred->getName() << "': " << *LI << '\n');
1229         return false;
1230       }
1231 
1232       if (LoadBB->isEHPad()) {
1233         LLVM_DEBUG(
1234             dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1235                    << Pred->getName() << "': " << *LI << '\n');
1236         return false;
1237       }
1238 
1239       // Do not split backedge as it will break the canonical loop form.
1240       if (!isLoadPRESplitBackedgeEnabled())
1241         if (DT->dominates(LoadBB, Pred)) {
1242           LLVM_DEBUG(
1243               dbgs()
1244               << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1245               << Pred->getName() << "': " << *LI << '\n');
1246           return false;
1247         }
1248 
1249       CriticalEdgePred.push_back(Pred);
1250     } else {
1251       // Only add the predecessors that will not be split for now.
1252       PredLoads[Pred] = nullptr;
1253     }
1254   }
1255 
1256   // Decide whether PRE is profitable for this load.
1257   unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
1258   assert(NumUnavailablePreds != 0 &&
1259          "Fully available value should already be eliminated!");
1260 
1261   // If this load is unavailable in multiple predecessors, reject it.
1262   // FIXME: If we could restructure the CFG, we could make a common pred with
1263   // all the preds that don't have an available LI and insert a new load into
1264   // that one block.
1265   if (NumUnavailablePreds != 1)
1266       return false;
1267 
1268   // Now we know where we will insert load. We must ensure that it is safe
1269   // to speculatively execute the load at that points.
1270   if (MustEnsureSafetyOfSpeculativeExecution) {
1271     if (CriticalEdgePred.size())
1272       if (!isSafeToSpeculativelyExecute(LI, LoadBB->getFirstNonPHI(), DT))
1273         return false;
1274     for (auto &PL : PredLoads)
1275       if (!isSafeToSpeculativelyExecute(LI, PL.first->getTerminator(), DT))
1276         return false;
1277   }
1278 
1279   // Split critical edges, and update the unavailable predecessors accordingly.
1280   for (BasicBlock *OrigPred : CriticalEdgePred) {
1281     BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1282     assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1283     PredLoads[NewPred] = nullptr;
1284     LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1285                       << LoadBB->getName() << '\n');
1286   }
1287 
1288   // Check if the load can safely be moved to all the unavailable predecessors.
1289   bool CanDoPRE = true;
1290   const DataLayout &DL = LI->getModule()->getDataLayout();
1291   SmallVector<Instruction*, 8> NewInsts;
1292   for (auto &PredLoad : PredLoads) {
1293     BasicBlock *UnavailablePred = PredLoad.first;
1294 
1295     // Do PHI translation to get its value in the predecessor if necessary.  The
1296     // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1297     // We do the translation for each edge we skipped by going from LI's block
1298     // to LoadBB, otherwise we might miss pieces needing translation.
1299 
1300     // If all preds have a single successor, then we know it is safe to insert
1301     // the load on the pred (?!?), so we can insert code to materialize the
1302     // pointer if it is not available.
1303     Value *LoadPtr = LI->getPointerOperand();
1304     BasicBlock *Cur = LI->getParent();
1305     while (Cur != LoadBB) {
1306       PHITransAddr Address(LoadPtr, DL, AC);
1307       LoadPtr = Address.PHITranslateWithInsertion(
1308           Cur, Cur->getSinglePredecessor(), *DT, NewInsts);
1309       if (!LoadPtr) {
1310         CanDoPRE = false;
1311         break;
1312       }
1313       Cur = Cur->getSinglePredecessor();
1314     }
1315 
1316     if (LoadPtr) {
1317       PHITransAddr Address(LoadPtr, DL, AC);
1318       LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT,
1319                                                   NewInsts);
1320     }
1321     // If we couldn't find or insert a computation of this phi translated value,
1322     // we fail PRE.
1323     if (!LoadPtr) {
1324       LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1325                         << *LI->getPointerOperand() << "\n");
1326       CanDoPRE = false;
1327       break;
1328     }
1329 
1330     PredLoad.second = LoadPtr;
1331   }
1332 
1333   if (!CanDoPRE) {
1334     while (!NewInsts.empty()) {
1335       // Erase instructions generated by the failed PHI translation before
1336       // trying to number them. PHI translation might insert instructions
1337       // in basic blocks other than the current one, and we delete them
1338       // directly, as markInstructionForDeletion only allows removing from the
1339       // current basic block.
1340       NewInsts.pop_back_val()->eraseFromParent();
1341     }
1342     // HINT: Don't revert the edge-splitting as following transformation may
1343     // also need to split these critical edges.
1344     return !CriticalEdgePred.empty();
1345   }
1346 
1347   // Okay, we can eliminate this load by inserting a reload in the predecessor
1348   // and using PHI construction to get the value in the other predecessors, do
1349   // it.
1350   LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1351   LLVM_DEBUG(if (!NewInsts.empty()) dbgs()
1352              << "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back()
1353              << '\n');
1354 
1355   // Assign value numbers to the new instructions.
1356   for (Instruction *I : NewInsts) {
1357     // Instructions that have been inserted in predecessor(s) to materialize
1358     // the load address do not retain their original debug locations. Doing
1359     // so could lead to confusing (but correct) source attributions.
1360     I->updateLocationAfterHoist();
1361 
1362     // FIXME: We really _ought_ to insert these value numbers into their
1363     // parent's availability map.  However, in doing so, we risk getting into
1364     // ordering issues.  If a block hasn't been processed yet, we would be
1365     // marking a value as AVAIL-IN, which isn't what we intend.
1366     VN.lookupOrAdd(I);
1367   }
1368 
1369   for (const auto &PredLoad : PredLoads) {
1370     BasicBlock *UnavailablePred = PredLoad.first;
1371     Value *LoadPtr = PredLoad.second;
1372 
1373     auto *NewLoad = new LoadInst(
1374         LI->getType(), LoadPtr, LI->getName() + ".pre", LI->isVolatile(),
1375         LI->getAlign(), LI->getOrdering(), LI->getSyncScopeID(),
1376         UnavailablePred->getTerminator());
1377     NewLoad->setDebugLoc(LI->getDebugLoc());
1378     if (MSSAU) {
1379       auto *MSSA = MSSAU->getMemorySSA();
1380       // Get the defining access of the original load or use the load if it is a
1381       // MemoryDef (e.g. because it is volatile). The inserted loads are
1382       // guaranteed to load from the same definition.
1383       auto *LIAcc = MSSA->getMemoryAccess(LI);
1384       auto *DefiningAcc =
1385           isa<MemoryDef>(LIAcc) ? LIAcc : LIAcc->getDefiningAccess();
1386       auto *NewAccess = MSSAU->createMemoryAccessInBB(
1387           NewLoad, DefiningAcc, NewLoad->getParent(),
1388           MemorySSA::BeforeTerminator);
1389       if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1390         MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1391       else
1392         MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1393     }
1394 
1395     // Transfer the old load's AA tags to the new load.
1396     AAMDNodes Tags;
1397     LI->getAAMetadata(Tags);
1398     if (Tags)
1399       NewLoad->setAAMetadata(Tags);
1400 
1401     if (auto *MD = LI->getMetadata(LLVMContext::MD_invariant_load))
1402       NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1403     if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group))
1404       NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1405     if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range))
1406       NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1407 
1408     // We do not propagate the old load's debug location, because the new
1409     // load now lives in a different BB, and we want to avoid a jumpy line
1410     // table.
1411     // FIXME: How do we retain source locations without causing poor debugging
1412     // behavior?
1413 
1414     // Add the newly created load.
1415     ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1416                                                         NewLoad));
1417     MD->invalidateCachedPointerInfo(LoadPtr);
1418     LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1419   }
1420 
1421   // Perform PHI construction.
1422   Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
1423   LI->replaceAllUsesWith(V);
1424   if (isa<PHINode>(V))
1425     V->takeName(LI);
1426   if (Instruction *I = dyn_cast<Instruction>(V))
1427     I->setDebugLoc(LI->getDebugLoc());
1428   if (V->getType()->isPtrOrPtrVectorTy())
1429     MD->invalidateCachedPointerInfo(V);
1430   markInstructionForDeletion(LI);
1431   ORE->emit([&]() {
1432     return OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI)
1433            << "load eliminated by PRE";
1434   });
1435   ++NumPRELoad;
1436   return true;
1437 }
1438 
1439 static void reportLoadElim(LoadInst *LI, Value *AvailableValue,
1440                            OptimizationRemarkEmitter *ORE) {
1441   using namespace ore;
1442 
1443   ORE->emit([&]() {
1444     return OptimizationRemark(DEBUG_TYPE, "LoadElim", LI)
1445            << "load of type " << NV("Type", LI->getType()) << " eliminated"
1446            << setExtraArgs() << " in favor of "
1447            << NV("InfavorOfValue", AvailableValue);
1448   });
1449 }
1450 
1451 /// Attempt to eliminate a load whose dependencies are
1452 /// non-local by performing PHI construction.
1453 bool GVN::processNonLocalLoad(LoadInst *LI) {
1454   // non-local speculations are not allowed under asan.
1455   if (LI->getParent()->getParent()->hasFnAttribute(
1456           Attribute::SanitizeAddress) ||
1457       LI->getParent()->getParent()->hasFnAttribute(
1458           Attribute::SanitizeHWAddress))
1459     return false;
1460 
1461   // Step 1: Find the non-local dependencies of the load.
1462   LoadDepVect Deps;
1463   MD->getNonLocalPointerDependency(LI, Deps);
1464 
1465   // If we had to process more than one hundred blocks to find the
1466   // dependencies, this load isn't worth worrying about.  Optimizing
1467   // it will be too expensive.
1468   unsigned NumDeps = Deps.size();
1469   if (NumDeps > MaxNumDeps)
1470     return false;
1471 
1472   // If we had a phi translation failure, we'll have a single entry which is a
1473   // clobber in the current block.  Reject this early.
1474   if (NumDeps == 1 &&
1475       !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1476     LLVM_DEBUG(dbgs() << "GVN: non-local load "; LI->printAsOperand(dbgs());
1477                dbgs() << " has unknown dependencies\n";);
1478     return false;
1479   }
1480 
1481   bool Changed = false;
1482   // If this load follows a GEP, see if we can PRE the indices before analyzing.
1483   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) {
1484     for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(),
1485                                         OE = GEP->idx_end();
1486          OI != OE; ++OI)
1487       if (Instruction *I = dyn_cast<Instruction>(OI->get()))
1488         Changed |= performScalarPRE(I);
1489   }
1490 
1491   // Step 2: Analyze the availability of the load
1492   AvailValInBlkVect ValuesPerBlock;
1493   UnavailBlkVect UnavailableBlocks;
1494   AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks);
1495 
1496   // If we have no predecessors that produce a known value for this load, exit
1497   // early.
1498   if (ValuesPerBlock.empty())
1499     return Changed;
1500 
1501   // Step 3: Eliminate fully redundancy.
1502   //
1503   // If all of the instructions we depend on produce a known value for this
1504   // load, then it is fully redundant and we can use PHI insertion to compute
1505   // its value.  Insert PHIs and remove the fully redundant value now.
1506   if (UnavailableBlocks.empty()) {
1507     LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1508 
1509     // Perform PHI construction.
1510     Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
1511     LI->replaceAllUsesWith(V);
1512 
1513     if (isa<PHINode>(V))
1514       V->takeName(LI);
1515     if (Instruction *I = dyn_cast<Instruction>(V))
1516       // If instruction I has debug info, then we should not update it.
1517       // Also, if I has a null DebugLoc, then it is still potentially incorrect
1518       // to propagate LI's DebugLoc because LI may not post-dominate I.
1519       if (LI->getDebugLoc() && LI->getParent() == I->getParent())
1520         I->setDebugLoc(LI->getDebugLoc());
1521     if (V->getType()->isPtrOrPtrVectorTy())
1522       MD->invalidateCachedPointerInfo(V);
1523     markInstructionForDeletion(LI);
1524     ++NumGVNLoad;
1525     reportLoadElim(LI, V, ORE);
1526     return true;
1527   }
1528 
1529   // Step 4: Eliminate partial redundancy.
1530   if (!isPREEnabled() || !isLoadPREEnabled())
1531     return Changed;
1532   if (!isLoadInLoopPREEnabled() && this->LI &&
1533       this->LI->getLoopFor(LI->getParent()))
1534     return Changed;
1535 
1536   return Changed || PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks);
1537 }
1538 
1539 static bool impliesEquivalanceIfTrue(CmpInst* Cmp) {
1540   if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ)
1541     return true;
1542 
1543   // Floating point comparisons can be equal, but not equivalent.  Cases:
1544   // NaNs for unordered operators
1545   // +0.0 vs 0.0 for all operators
1546   if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1547       (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1548        Cmp->getFastMathFlags().noNaNs())) {
1549       Value *LHS = Cmp->getOperand(0);
1550       Value *RHS = Cmp->getOperand(1);
1551       // If we can prove either side non-zero, then equality must imply
1552       // equivalence.
1553       // FIXME: We should do this optimization if 'no signed zeros' is
1554       // applicable via an instruction-level fast-math-flag or some other
1555       // indicator that relaxed FP semantics are being used.
1556       if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1557         return true;
1558       if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1559         return true;;
1560       // TODO: Handle vector floating point constants
1561   }
1562   return false;
1563 }
1564 
1565 static bool impliesEquivalanceIfFalse(CmpInst* Cmp) {
1566   if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE)
1567     return true;
1568 
1569   // Floating point comparisons can be equal, but not equivelent.  Cases:
1570   // NaNs for unordered operators
1571   // +0.0 vs 0.0 for all operators
1572   if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE &&
1573        Cmp->getFastMathFlags().noNaNs()) ||
1574       Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) {
1575       Value *LHS = Cmp->getOperand(0);
1576       Value *RHS = Cmp->getOperand(1);
1577       // If we can prove either side non-zero, then equality must imply
1578       // equivalence.
1579       // FIXME: We should do this optimization if 'no signed zeros' is
1580       // applicable via an instruction-level fast-math-flag or some other
1581       // indicator that relaxed FP semantics are being used.
1582       if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1583         return true;
1584       if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1585         return true;;
1586       // TODO: Handle vector floating point constants
1587   }
1588   return false;
1589 }
1590 
1591 
1592 static bool hasUsersIn(Value *V, BasicBlock *BB) {
1593   for (User *U : V->users())
1594     if (isa<Instruction>(U) &&
1595         cast<Instruction>(U)->getParent() == BB)
1596       return true;
1597   return false;
1598 }
1599 
1600 bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) {
1601   assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume &&
1602          "This function can only be called with llvm.assume intrinsic");
1603   Value *V = IntrinsicI->getArgOperand(0);
1604 
1605   if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1606     if (Cond->isZero()) {
1607       Type *Int8Ty = Type::getInt8Ty(V->getContext());
1608       // Insert a new store to null instruction before the load to indicate that
1609       // this code is not reachable.  FIXME: We could insert unreachable
1610       // instruction directly because we can modify the CFG.
1611       auto *NewS = new StoreInst(UndefValue::get(Int8Ty),
1612                                  Constant::getNullValue(Int8Ty->getPointerTo()),
1613                                  IntrinsicI);
1614       if (MSSAU) {
1615         const MemoryUseOrDef *FirstNonDom = nullptr;
1616         const auto *AL =
1617             MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
1618 
1619         // If there are accesses in the current basic block, find the first one
1620         // that does not come before NewS. The new memory access is inserted
1621         // after the found access or before the terminator if no such access is
1622         // found.
1623         if (AL) {
1624           for (auto &Acc : *AL) {
1625             if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
1626               if (!Current->getMemoryInst()->comesBefore(NewS)) {
1627                 FirstNonDom = Current;
1628                 break;
1629               }
1630           }
1631         }
1632 
1633         // This added store is to null, so it will never executed and we can
1634         // just use the LiveOnEntry def as defining access.
1635         auto *NewDef =
1636             FirstNonDom ? MSSAU->createMemoryAccessBefore(
1637                               NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1638                               const_cast<MemoryUseOrDef *>(FirstNonDom))
1639                         : MSSAU->createMemoryAccessInBB(
1640                               NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1641                               NewS->getParent(), MemorySSA::BeforeTerminator);
1642 
1643         MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
1644       }
1645     }
1646     if (isAssumeWithEmptyBundle(*IntrinsicI))
1647       markInstructionForDeletion(IntrinsicI);
1648     return false;
1649   } else if (isa<Constant>(V)) {
1650     // If it's not false, and constant, it must evaluate to true. This means our
1651     // assume is assume(true), and thus, pointless, and we don't want to do
1652     // anything more here.
1653     return false;
1654   }
1655 
1656   Constant *True = ConstantInt::getTrue(V->getContext());
1657   bool Changed = false;
1658 
1659   for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
1660     BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
1661 
1662     // This property is only true in dominated successors, propagateEquality
1663     // will check dominance for us.
1664     Changed |= propagateEquality(V, True, Edge, false);
1665   }
1666 
1667   // We can replace assume value with true, which covers cases like this:
1668   // call void @llvm.assume(i1 %cmp)
1669   // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
1670   ReplaceOperandsWithMap[V] = True;
1671 
1672   // Similarly, after assume(!NotV) we know that NotV == false.
1673   Value *NotV;
1674   if (match(V, m_Not(m_Value(NotV))))
1675     ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
1676 
1677   // If we find an equality fact, canonicalize all dominated uses in this block
1678   // to one of the two values.  We heuristically choice the "oldest" of the
1679   // two where age is determined by value number. (Note that propagateEquality
1680   // above handles the cross block case.)
1681   //
1682   // Key case to cover are:
1683   // 1)
1684   // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
1685   // call void @llvm.assume(i1 %cmp)
1686   // ret float %0 ; will change it to ret float 3.000000e+00
1687   // 2)
1688   // %load = load float, float* %addr
1689   // %cmp = fcmp oeq float %load, %0
1690   // call void @llvm.assume(i1 %cmp)
1691   // ret float %load ; will change it to ret float %0
1692   if (auto *CmpI = dyn_cast<CmpInst>(V)) {
1693     if (impliesEquivalanceIfTrue(CmpI)) {
1694       Value *CmpLHS = CmpI->getOperand(0);
1695       Value *CmpRHS = CmpI->getOperand(1);
1696       // Heuristically pick the better replacement -- the choice of heuristic
1697       // isn't terribly important here, but the fact we canonicalize on some
1698       // replacement is for exposing other simplifications.
1699       // TODO: pull this out as a helper function and reuse w/existing
1700       // (slightly different) logic.
1701       if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
1702         std::swap(CmpLHS, CmpRHS);
1703       if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
1704         std::swap(CmpLHS, CmpRHS);
1705       if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
1706           (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
1707         // Move the 'oldest' value to the right-hand side, using the value
1708         // number as a proxy for age.
1709         uint32_t LVN = VN.lookupOrAdd(CmpLHS);
1710         uint32_t RVN = VN.lookupOrAdd(CmpRHS);
1711         if (LVN < RVN)
1712           std::swap(CmpLHS, CmpRHS);
1713       }
1714 
1715       // Handle degenerate case where we either haven't pruned a dead path or a
1716       // removed a trivial assume yet.
1717       if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
1718         return Changed;
1719 
1720       LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
1721                  << *CmpLHS << " with "
1722                  << *CmpRHS << " in block "
1723                  << IntrinsicI->getParent()->getName() << "\n");
1724 
1725 
1726       // Setup the replacement map - this handles uses within the same block
1727       if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
1728         ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
1729 
1730       // NOTE: The non-block local cases are handled by the call to
1731       // propagateEquality above; this block is just about handling the block
1732       // local cases.  TODO: There's a bunch of logic in propagateEqualiy which
1733       // isn't duplicated for the block local case, can we share it somehow?
1734     }
1735   }
1736   return Changed;
1737 }
1738 
1739 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
1740   patchReplacementInstruction(I, Repl);
1741   I->replaceAllUsesWith(Repl);
1742 }
1743 
1744 /// Attempt to eliminate a load, first by eliminating it
1745 /// locally, and then attempting non-local elimination if that fails.
1746 bool GVN::processLoad(LoadInst *L) {
1747   if (!MD)
1748     return false;
1749 
1750   // This code hasn't been audited for ordered or volatile memory access
1751   if (!L->isUnordered())
1752     return false;
1753 
1754   if (L->use_empty()) {
1755     markInstructionForDeletion(L);
1756     return true;
1757   }
1758 
1759   // ... to a pointer that has been loaded from before...
1760   MemDepResult Dep = MD->getDependency(L);
1761 
1762   // If it is defined in another block, try harder.
1763   if (Dep.isNonLocal())
1764     return processNonLocalLoad(L);
1765 
1766   // Only handle the local case below
1767   if (!Dep.isDef() && !Dep.isClobber()) {
1768     // This might be a NonFuncLocal or an Unknown
1769     LLVM_DEBUG(
1770         // fast print dep, using operator<< on instruction is too slow.
1771         dbgs() << "GVN: load "; L->printAsOperand(dbgs());
1772         dbgs() << " has unknown dependence\n";);
1773     return false;
1774   }
1775 
1776   AvailableValue AV;
1777   if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) {
1778     Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this);
1779 
1780     // Replace the load!
1781     patchAndReplaceAllUsesWith(L, AvailableValue);
1782     markInstructionForDeletion(L);
1783     if (MSSAU)
1784       MSSAU->removeMemoryAccess(L);
1785     ++NumGVNLoad;
1786     reportLoadElim(L, AvailableValue, ORE);
1787     // Tell MDA to reexamine the reused pointer since we might have more
1788     // information after forwarding it.
1789     if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
1790       MD->invalidateCachedPointerInfo(AvailableValue);
1791     return true;
1792   }
1793 
1794   return false;
1795 }
1796 
1797 /// Return a pair the first field showing the value number of \p Exp and the
1798 /// second field showing whether it is a value number newly created.
1799 std::pair<uint32_t, bool>
1800 GVN::ValueTable::assignExpNewValueNum(Expression &Exp) {
1801   uint32_t &e = expressionNumbering[Exp];
1802   bool CreateNewValNum = !e;
1803   if (CreateNewValNum) {
1804     Expressions.push_back(Exp);
1805     if (ExprIdx.size() < nextValueNumber + 1)
1806       ExprIdx.resize(nextValueNumber * 2);
1807     e = nextValueNumber;
1808     ExprIdx[nextValueNumber++] = nextExprNumber++;
1809   }
1810   return {e, CreateNewValNum};
1811 }
1812 
1813 /// Return whether all the values related with the same \p num are
1814 /// defined in \p BB.
1815 bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
1816                                      GVN &Gvn) {
1817   LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1818   while (Vals && Vals->BB == BB)
1819     Vals = Vals->Next;
1820   return !Vals;
1821 }
1822 
1823 /// Wrap phiTranslateImpl to provide caching functionality.
1824 uint32_t GVN::ValueTable::phiTranslate(const BasicBlock *Pred,
1825                                        const BasicBlock *PhiBlock, uint32_t Num,
1826                                        GVN &Gvn) {
1827   auto FindRes = PhiTranslateTable.find({Num, Pred});
1828   if (FindRes != PhiTranslateTable.end())
1829     return FindRes->second;
1830   uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
1831   PhiTranslateTable.insert({{Num, Pred}, NewNum});
1832   return NewNum;
1833 }
1834 
1835 // Return true if the value number \p Num and NewNum have equal value.
1836 // Return false if the result is unknown.
1837 bool GVN::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
1838                                        const BasicBlock *Pred,
1839                                        const BasicBlock *PhiBlock, GVN &Gvn) {
1840   CallInst *Call = nullptr;
1841   LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1842   while (Vals) {
1843     Call = dyn_cast<CallInst>(Vals->Val);
1844     if (Call && Call->getParent() == PhiBlock)
1845       break;
1846     Vals = Vals->Next;
1847   }
1848 
1849   if (AA->doesNotAccessMemory(Call))
1850     return true;
1851 
1852   if (!MD || !AA->onlyReadsMemory(Call))
1853     return false;
1854 
1855   MemDepResult local_dep = MD->getDependency(Call);
1856   if (!local_dep.isNonLocal())
1857     return false;
1858 
1859   const MemoryDependenceResults::NonLocalDepInfo &deps =
1860       MD->getNonLocalCallDependency(Call);
1861 
1862   // Check to see if the Call has no function local clobber.
1863   for (const NonLocalDepEntry &D : deps) {
1864     if (D.getResult().isNonFuncLocal())
1865       return true;
1866   }
1867   return false;
1868 }
1869 
1870 /// Translate value number \p Num using phis, so that it has the values of
1871 /// the phis in BB.
1872 uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
1873                                            const BasicBlock *PhiBlock,
1874                                            uint32_t Num, GVN &Gvn) {
1875   if (PHINode *PN = NumberingPhi[Num]) {
1876     for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
1877       if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
1878         if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
1879           return TransVal;
1880     }
1881     return Num;
1882   }
1883 
1884   // If there is any value related with Num is defined in a BB other than
1885   // PhiBlock, it cannot depend on a phi in PhiBlock without going through
1886   // a backedge. We can do an early exit in that case to save compile time.
1887   if (!areAllValsInBB(Num, PhiBlock, Gvn))
1888     return Num;
1889 
1890   if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
1891     return Num;
1892   Expression Exp = Expressions[ExprIdx[Num]];
1893 
1894   for (unsigned i = 0; i < Exp.varargs.size(); i++) {
1895     // For InsertValue and ExtractValue, some varargs are index numbers
1896     // instead of value numbers. Those index numbers should not be
1897     // translated.
1898     if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
1899         (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
1900         (i > 1 && Exp.opcode == Instruction::ShuffleVector))
1901       continue;
1902     Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
1903   }
1904 
1905   if (Exp.commutative) {
1906     assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
1907     if (Exp.varargs[0] > Exp.varargs[1]) {
1908       std::swap(Exp.varargs[0], Exp.varargs[1]);
1909       uint32_t Opcode = Exp.opcode >> 8;
1910       if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
1911         Exp.opcode = (Opcode << 8) |
1912                      CmpInst::getSwappedPredicate(
1913                          static_cast<CmpInst::Predicate>(Exp.opcode & 255));
1914     }
1915   }
1916 
1917   if (uint32_t NewNum = expressionNumbering[Exp]) {
1918     if (Exp.opcode == Instruction::Call && NewNum != Num)
1919       return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
1920     return NewNum;
1921   }
1922   return Num;
1923 }
1924 
1925 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed
1926 /// again.
1927 void GVN::ValueTable::eraseTranslateCacheEntry(uint32_t Num,
1928                                                const BasicBlock &CurrBlock) {
1929   for (const BasicBlock *Pred : predecessors(&CurrBlock))
1930     PhiTranslateTable.erase({Num, Pred});
1931 }
1932 
1933 // In order to find a leader for a given value number at a
1934 // specific basic block, we first obtain the list of all Values for that number,
1935 // and then scan the list to find one whose block dominates the block in
1936 // question.  This is fast because dominator tree queries consist of only
1937 // a few comparisons of DFS numbers.
1938 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
1939   LeaderTableEntry Vals = LeaderTable[num];
1940   if (!Vals.Val) return nullptr;
1941 
1942   Value *Val = nullptr;
1943   if (DT->dominates(Vals.BB, BB)) {
1944     Val = Vals.Val;
1945     if (isa<Constant>(Val)) return Val;
1946   }
1947 
1948   LeaderTableEntry* Next = Vals.Next;
1949   while (Next) {
1950     if (DT->dominates(Next->BB, BB)) {
1951       if (isa<Constant>(Next->Val)) return Next->Val;
1952       if (!Val) Val = Next->Val;
1953     }
1954 
1955     Next = Next->Next;
1956   }
1957 
1958   return Val;
1959 }
1960 
1961 /// There is an edge from 'Src' to 'Dst'.  Return
1962 /// true if every path from the entry block to 'Dst' passes via this edge.  In
1963 /// particular 'Dst' must not be reachable via another edge from 'Src'.
1964 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
1965                                        DominatorTree *DT) {
1966   // While in theory it is interesting to consider the case in which Dst has
1967   // more than one predecessor, because Dst might be part of a loop which is
1968   // only reachable from Src, in practice it is pointless since at the time
1969   // GVN runs all such loops have preheaders, which means that Dst will have
1970   // been changed to have only one predecessor, namely Src.
1971   const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
1972   assert((!Pred || Pred == E.getStart()) &&
1973          "No edge between these basic blocks!");
1974   return Pred != nullptr;
1975 }
1976 
1977 void GVN::assignBlockRPONumber(Function &F) {
1978   BlockRPONumber.clear();
1979   uint32_t NextBlockNumber = 1;
1980   ReversePostOrderTraversal<Function *> RPOT(&F);
1981   for (BasicBlock *BB : RPOT)
1982     BlockRPONumber[BB] = NextBlockNumber++;
1983   InvalidBlockRPONumbers = false;
1984 }
1985 
1986 bool GVN::replaceOperandsForInBlockEquality(Instruction *Instr) const {
1987   bool Changed = false;
1988   for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
1989     Value *Operand = Instr->getOperand(OpNum);
1990     auto it = ReplaceOperandsWithMap.find(Operand);
1991     if (it != ReplaceOperandsWithMap.end()) {
1992       LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
1993                         << *it->second << " in instruction " << *Instr << '\n');
1994       Instr->setOperand(OpNum, it->second);
1995       Changed = true;
1996     }
1997   }
1998   return Changed;
1999 }
2000 
2001 /// The given values are known to be equal in every block
2002 /// dominated by 'Root'.  Exploit this, for example by replacing 'LHS' with
2003 /// 'RHS' everywhere in the scope.  Returns whether a change was made.
2004 /// If DominatesByEdge is false, then it means that we will propagate the RHS
2005 /// value starting from the end of Root.Start.
2006 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
2007                             bool DominatesByEdge) {
2008   SmallVector<std::pair<Value*, Value*>, 4> Worklist;
2009   Worklist.push_back(std::make_pair(LHS, RHS));
2010   bool Changed = false;
2011   // For speed, compute a conservative fast approximation to
2012   // DT->dominates(Root, Root.getEnd());
2013   const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2014 
2015   while (!Worklist.empty()) {
2016     std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2017     LHS = Item.first; RHS = Item.second;
2018 
2019     if (LHS == RHS)
2020       continue;
2021     assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2022 
2023     // Don't try to propagate equalities between constants.
2024     if (isa<Constant>(LHS) && isa<Constant>(RHS))
2025       continue;
2026 
2027     // Prefer a constant on the right-hand side, or an Argument if no constants.
2028     if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2029       std::swap(LHS, RHS);
2030     assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2031 
2032     // If there is no obvious reason to prefer the left-hand side over the
2033     // right-hand side, ensure the longest lived term is on the right-hand side,
2034     // so the shortest lived term will be replaced by the longest lived.
2035     // This tends to expose more simplifications.
2036     uint32_t LVN = VN.lookupOrAdd(LHS);
2037     if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2038         (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2039       // Move the 'oldest' value to the right-hand side, using the value number
2040       // as a proxy for age.
2041       uint32_t RVN = VN.lookupOrAdd(RHS);
2042       if (LVN < RVN) {
2043         std::swap(LHS, RHS);
2044         LVN = RVN;
2045       }
2046     }
2047 
2048     // If value numbering later sees that an instruction in the scope is equal
2049     // to 'LHS' then ensure it will be turned into 'RHS'.  In order to preserve
2050     // the invariant that instructions only occur in the leader table for their
2051     // own value number (this is used by removeFromLeaderTable), do not do this
2052     // if RHS is an instruction (if an instruction in the scope is morphed into
2053     // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2054     // using the leader table is about compiling faster, not optimizing better).
2055     // The leader table only tracks basic blocks, not edges. Only add to if we
2056     // have the simple case where the edge dominates the end.
2057     if (RootDominatesEnd && !isa<Instruction>(RHS))
2058       addToLeaderTable(LVN, RHS, Root.getEnd());
2059 
2060     // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope.  As
2061     // LHS always has at least one use that is not dominated by Root, this will
2062     // never do anything if LHS has only one use.
2063     if (!LHS->hasOneUse()) {
2064       unsigned NumReplacements =
2065           DominatesByEdge
2066               ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
2067               : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
2068 
2069       Changed |= NumReplacements > 0;
2070       NumGVNEqProp += NumReplacements;
2071       // Cached information for anything that uses LHS will be invalid.
2072       if (MD)
2073         MD->invalidateCachedPointerInfo(LHS);
2074     }
2075 
2076     // Now try to deduce additional equalities from this one. For example, if
2077     // the known equality was "(A != B)" == "false" then it follows that A and B
2078     // are equal in the scope. Only boolean equalities with an explicit true or
2079     // false RHS are currently supported.
2080     if (!RHS->getType()->isIntegerTy(1))
2081       // Not a boolean equality - bail out.
2082       continue;
2083     ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2084     if (!CI)
2085       // RHS neither 'true' nor 'false' - bail out.
2086       continue;
2087     // Whether RHS equals 'true'.  Otherwise it equals 'false'.
2088     bool isKnownTrue = CI->isMinusOne();
2089     bool isKnownFalse = !isKnownTrue;
2090 
2091     // If "A && B" is known true then both A and B are known true.  If "A || B"
2092     // is known false then both A and B are known false.
2093     Value *A, *B;
2094     if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
2095         (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
2096       Worklist.push_back(std::make_pair(A, RHS));
2097       Worklist.push_back(std::make_pair(B, RHS));
2098       continue;
2099     }
2100 
2101     // If we are propagating an equality like "(A == B)" == "true" then also
2102     // propagate the equality A == B.  When propagating a comparison such as
2103     // "(A >= B)" == "true", replace all instances of "A < B" with "false".
2104     if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2105       Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2106 
2107       // If "A == B" is known true, or "A != B" is known false, then replace
2108       // A with B everywhere in the scope.  For floating point operations, we
2109       // have to be careful since equality does not always imply equivalance.
2110       if ((isKnownTrue && impliesEquivalanceIfTrue(Cmp)) ||
2111           (isKnownFalse && impliesEquivalanceIfFalse(Cmp)))
2112         Worklist.push_back(std::make_pair(Op0, Op1));
2113 
2114       // If "A >= B" is known true, replace "A < B" with false everywhere.
2115       CmpInst::Predicate NotPred = Cmp->getInversePredicate();
2116       Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
2117       // Since we don't have the instruction "A < B" immediately to hand, work
2118       // out the value number that it would have and use that to find an
2119       // appropriate instruction (if any).
2120       uint32_t NextNum = VN.getNextUnusedValueNumber();
2121       uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2122       // If the number we were assigned was brand new then there is no point in
2123       // looking for an instruction realizing it: there cannot be one!
2124       if (Num < NextNum) {
2125         Value *NotCmp = findLeader(Root.getEnd(), Num);
2126         if (NotCmp && isa<Instruction>(NotCmp)) {
2127           unsigned NumReplacements =
2128               DominatesByEdge
2129                   ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
2130                   : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2131                                              Root.getStart());
2132           Changed |= NumReplacements > 0;
2133           NumGVNEqProp += NumReplacements;
2134           // Cached information for anything that uses NotCmp will be invalid.
2135           if (MD)
2136             MD->invalidateCachedPointerInfo(NotCmp);
2137         }
2138       }
2139       // Ensure that any instruction in scope that gets the "A < B" value number
2140       // is replaced with false.
2141       // The leader table only tracks basic blocks, not edges. Only add to if we
2142       // have the simple case where the edge dominates the end.
2143       if (RootDominatesEnd)
2144         addToLeaderTable(Num, NotVal, Root.getEnd());
2145 
2146       continue;
2147     }
2148   }
2149 
2150   return Changed;
2151 }
2152 
2153 /// When calculating availability, handle an instruction
2154 /// by inserting it into the appropriate sets
2155 bool GVN::processInstruction(Instruction *I) {
2156   // Ignore dbg info intrinsics.
2157   if (isa<DbgInfoIntrinsic>(I))
2158     return false;
2159 
2160   // If the instruction can be easily simplified then do so now in preference
2161   // to value numbering it.  Value numbering often exposes redundancies, for
2162   // example if it determines that %y is equal to %x then the instruction
2163   // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2164   const DataLayout &DL = I->getModule()->getDataLayout();
2165   if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) {
2166     bool Changed = false;
2167     if (!I->use_empty()) {
2168       I->replaceAllUsesWith(V);
2169       Changed = true;
2170     }
2171     if (isInstructionTriviallyDead(I, TLI)) {
2172       markInstructionForDeletion(I);
2173       Changed = true;
2174     }
2175     if (Changed) {
2176       if (MD && V->getType()->isPtrOrPtrVectorTy())
2177         MD->invalidateCachedPointerInfo(V);
2178       ++NumGVNSimpl;
2179       return true;
2180     }
2181   }
2182 
2183   if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I))
2184     if (IntrinsicI->getIntrinsicID() == Intrinsic::assume)
2185       return processAssumeIntrinsic(IntrinsicI);
2186 
2187   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
2188     if (processLoad(LI))
2189       return true;
2190 
2191     unsigned Num = VN.lookupOrAdd(LI);
2192     addToLeaderTable(Num, LI, LI->getParent());
2193     return false;
2194   }
2195 
2196   // For conditional branches, we can perform simple conditional propagation on
2197   // the condition value itself.
2198   if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2199     if (!BI->isConditional())
2200       return false;
2201 
2202     if (isa<Constant>(BI->getCondition()))
2203       return processFoldableCondBr(BI);
2204 
2205     Value *BranchCond = BI->getCondition();
2206     BasicBlock *TrueSucc = BI->getSuccessor(0);
2207     BasicBlock *FalseSucc = BI->getSuccessor(1);
2208     // Avoid multiple edges early.
2209     if (TrueSucc == FalseSucc)
2210       return false;
2211 
2212     BasicBlock *Parent = BI->getParent();
2213     bool Changed = false;
2214 
2215     Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
2216     BasicBlockEdge TrueE(Parent, TrueSucc);
2217     Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2218 
2219     Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
2220     BasicBlockEdge FalseE(Parent, FalseSucc);
2221     Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2222 
2223     return Changed;
2224   }
2225 
2226   // For switches, propagate the case values into the case destinations.
2227   if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2228     Value *SwitchCond = SI->getCondition();
2229     BasicBlock *Parent = SI->getParent();
2230     bool Changed = false;
2231 
2232     // Remember how many outgoing edges there are to every successor.
2233     SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2234     for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
2235       ++SwitchEdges[SI->getSuccessor(i)];
2236 
2237     for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2238          i != e; ++i) {
2239       BasicBlock *Dst = i->getCaseSuccessor();
2240       // If there is only a single edge, propagate the case value into it.
2241       if (SwitchEdges.lookup(Dst) == 1) {
2242         BasicBlockEdge E(Parent, Dst);
2243         Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2244       }
2245     }
2246     return Changed;
2247   }
2248 
2249   // Instructions with void type don't return a value, so there's
2250   // no point in trying to find redundancies in them.
2251   if (I->getType()->isVoidTy())
2252     return false;
2253 
2254   uint32_t NextNum = VN.getNextUnusedValueNumber();
2255   unsigned Num = VN.lookupOrAdd(I);
2256 
2257   // Allocations are always uniquely numbered, so we can save time and memory
2258   // by fast failing them.
2259   if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2260     addToLeaderTable(Num, I, I->getParent());
2261     return false;
2262   }
2263 
2264   // If the number we were assigned was a brand new VN, then we don't
2265   // need to do a lookup to see if the number already exists
2266   // somewhere in the domtree: it can't!
2267   if (Num >= NextNum) {
2268     addToLeaderTable(Num, I, I->getParent());
2269     return false;
2270   }
2271 
2272   // Perform fast-path value-number based elimination of values inherited from
2273   // dominators.
2274   Value *Repl = findLeader(I->getParent(), Num);
2275   if (!Repl) {
2276     // Failure, just remember this instance for future use.
2277     addToLeaderTable(Num, I, I->getParent());
2278     return false;
2279   } else if (Repl == I) {
2280     // If I was the result of a shortcut PRE, it might already be in the table
2281     // and the best replacement for itself. Nothing to do.
2282     return false;
2283   }
2284 
2285   // Remove it!
2286   patchAndReplaceAllUsesWith(I, Repl);
2287   if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2288     MD->invalidateCachedPointerInfo(Repl);
2289   markInstructionForDeletion(I);
2290   return true;
2291 }
2292 
2293 /// runOnFunction - This is the main transformation entry point for a function.
2294 bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2295                   const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2296                   MemoryDependenceResults *RunMD, LoopInfo *LI,
2297                   OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2298   AC = &RunAC;
2299   DT = &RunDT;
2300   VN.setDomTree(DT);
2301   TLI = &RunTLI;
2302   VN.setAliasAnalysis(&RunAA);
2303   MD = RunMD;
2304   ImplicitControlFlowTracking ImplicitCFT;
2305   ICF = &ImplicitCFT;
2306   this->LI = LI;
2307   VN.setMemDep(MD);
2308   ORE = RunORE;
2309   InvalidBlockRPONumbers = true;
2310   MemorySSAUpdater Updater(MSSA);
2311   MSSAU = MSSA ? &Updater : nullptr;
2312 
2313   bool Changed = false;
2314   bool ShouldContinue = true;
2315 
2316   DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
2317   // Merge unconditional branches, allowing PRE to catch more
2318   // optimization opportunities.
2319   for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2320     BasicBlock *BB = &*FI++;
2321 
2322     bool removedBlock = MergeBlockIntoPredecessor(BB, &DTU, LI, MSSAU, MD);
2323     if (removedBlock)
2324       ++NumGVNBlocks;
2325 
2326     Changed |= removedBlock;
2327   }
2328 
2329   unsigned Iteration = 0;
2330   while (ShouldContinue) {
2331     LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2332     ShouldContinue = iterateOnFunction(F);
2333     Changed |= ShouldContinue;
2334     ++Iteration;
2335   }
2336 
2337   if (isPREEnabled()) {
2338     // Fabricate val-num for dead-code in order to suppress assertion in
2339     // performPRE().
2340     assignValNumForDeadCode();
2341     bool PREChanged = true;
2342     while (PREChanged) {
2343       PREChanged = performPRE(F);
2344       Changed |= PREChanged;
2345     }
2346   }
2347 
2348   // FIXME: Should perform GVN again after PRE does something.  PRE can move
2349   // computations into blocks where they become fully redundant.  Note that
2350   // we can't do this until PRE's critical edge splitting updates memdep.
2351   // Actually, when this happens, we should just fully integrate PRE into GVN.
2352 
2353   cleanupGlobalSets();
2354   // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2355   // iteration.
2356   DeadBlocks.clear();
2357 
2358   if (MSSA && VerifyMemorySSA)
2359     MSSA->verifyMemorySSA();
2360 
2361   return Changed;
2362 }
2363 
2364 bool GVN::processBlock(BasicBlock *BB) {
2365   // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2366   // (and incrementing BI before processing an instruction).
2367   assert(InstrsToErase.empty() &&
2368          "We expect InstrsToErase to be empty across iterations");
2369   if (DeadBlocks.count(BB))
2370     return false;
2371 
2372   // Clearing map before every BB because it can be used only for single BB.
2373   ReplaceOperandsWithMap.clear();
2374   bool ChangedFunction = false;
2375 
2376   // Since we may not have visited the input blocks of the phis, we can't
2377   // use our normal hash approach for phis.  Instead, simply look for
2378   // obvious duplicates.  The first pass of GVN will tend to create
2379   // identical phis, and the second or later passes can eliminate them.
2380   ChangedFunction |= EliminateDuplicatePHINodes(BB);
2381 
2382   for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2383        BI != BE;) {
2384     if (!ReplaceOperandsWithMap.empty())
2385       ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2386     ChangedFunction |= processInstruction(&*BI);
2387 
2388     if (InstrsToErase.empty()) {
2389       ++BI;
2390       continue;
2391     }
2392 
2393     // If we need some instructions deleted, do it now.
2394     NumGVNInstr += InstrsToErase.size();
2395 
2396     // Avoid iterator invalidation.
2397     bool AtStart = BI == BB->begin();
2398     if (!AtStart)
2399       --BI;
2400 
2401     for (auto *I : InstrsToErase) {
2402       assert(I->getParent() == BB && "Removing instruction from wrong block?");
2403       LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2404       salvageKnowledge(I, AC);
2405       salvageDebugInfo(*I);
2406       if (MD) MD->removeInstruction(I);
2407       if (MSSAU)
2408         MSSAU->removeMemoryAccess(I);
2409       LLVM_DEBUG(verifyRemoved(I));
2410       ICF->removeInstruction(I);
2411       I->eraseFromParent();
2412     }
2413     InstrsToErase.clear();
2414 
2415     if (AtStart)
2416       BI = BB->begin();
2417     else
2418       ++BI;
2419   }
2420 
2421   return ChangedFunction;
2422 }
2423 
2424 // Instantiate an expression in a predecessor that lacked it.
2425 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2426                                     BasicBlock *Curr, unsigned int ValNo) {
2427   // Because we are going top-down through the block, all value numbers
2428   // will be available in the predecessor by the time we need them.  Any
2429   // that weren't originally present will have been instantiated earlier
2430   // in this loop.
2431   bool success = true;
2432   for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2433     Value *Op = Instr->getOperand(i);
2434     if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2435       continue;
2436     // This could be a newly inserted instruction, in which case, we won't
2437     // find a value number, and should give up before we hurt ourselves.
2438     // FIXME: Rewrite the infrastructure to let it easier to value number
2439     // and process newly inserted instructions.
2440     if (!VN.exists(Op)) {
2441       success = false;
2442       break;
2443     }
2444     uint32_t TValNo =
2445         VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2446     if (Value *V = findLeader(Pred, TValNo)) {
2447       Instr->setOperand(i, V);
2448     } else {
2449       success = false;
2450       break;
2451     }
2452   }
2453 
2454   // Fail out if we encounter an operand that is not available in
2455   // the PRE predecessor.  This is typically because of loads which
2456   // are not value numbered precisely.
2457   if (!success)
2458     return false;
2459 
2460   Instr->insertBefore(Pred->getTerminator());
2461   Instr->setName(Instr->getName() + ".pre");
2462   Instr->setDebugLoc(Instr->getDebugLoc());
2463 
2464   unsigned Num = VN.lookupOrAdd(Instr);
2465   VN.add(Instr, Num);
2466 
2467   // Update the availability map to include the new instruction.
2468   addToLeaderTable(Num, Instr, Pred);
2469   return true;
2470 }
2471 
2472 bool GVN::performScalarPRE(Instruction *CurInst) {
2473   if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2474       isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2475       CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2476       isa<DbgInfoIntrinsic>(CurInst))
2477     return false;
2478 
2479   // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2480   // sinking the compare again, and it would force the code generator to
2481   // move the i1 from processor flags or predicate registers into a general
2482   // purpose register.
2483   if (isa<CmpInst>(CurInst))
2484     return false;
2485 
2486   // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2487   // sinking the addressing mode computation back to its uses. Extending the
2488   // GEP's live range increases the register pressure, and therefore it can
2489   // introduce unnecessary spills.
2490   //
2491   // This doesn't prevent Load PRE. PHI translation will make the GEP available
2492   // to the load by moving it to the predecessor block if necessary.
2493   if (isa<GetElementPtrInst>(CurInst))
2494     return false;
2495 
2496   if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
2497     // We don't currently value number ANY inline asm calls.
2498     if (CallB->isInlineAsm())
2499       return false;
2500     // Don't do PRE on convergent calls.
2501     if (CallB->isConvergent())
2502       return false;
2503   }
2504 
2505   uint32_t ValNo = VN.lookup(CurInst);
2506 
2507   // Look for the predecessors for PRE opportunities.  We're
2508   // only trying to solve the basic diamond case, where
2509   // a value is computed in the successor and one predecessor,
2510   // but not the other.  We also explicitly disallow cases
2511   // where the successor is its own predecessor, because they're
2512   // more complicated to get right.
2513   unsigned NumWith = 0;
2514   unsigned NumWithout = 0;
2515   BasicBlock *PREPred = nullptr;
2516   BasicBlock *CurrentBlock = CurInst->getParent();
2517 
2518   // Update the RPO numbers for this function.
2519   if (InvalidBlockRPONumbers)
2520     assignBlockRPONumber(*CurrentBlock->getParent());
2521 
2522   SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
2523   for (BasicBlock *P : predecessors(CurrentBlock)) {
2524     // We're not interested in PRE where blocks with predecessors that are
2525     // not reachable.
2526     if (!DT->isReachableFromEntry(P)) {
2527       NumWithout = 2;
2528       break;
2529     }
2530     // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and
2531     // when CurInst has operand defined in CurrentBlock (so it may be defined
2532     // by phi in the loop header).
2533     assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2534            "Invalid BlockRPONumber map.");
2535     if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] &&
2536         llvm::any_of(CurInst->operands(), [&](const Use &U) {
2537           if (auto *Inst = dyn_cast<Instruction>(U.get()))
2538             return Inst->getParent() == CurrentBlock;
2539           return false;
2540         })) {
2541       NumWithout = 2;
2542       break;
2543     }
2544 
2545     uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2546     Value *predV = findLeader(P, TValNo);
2547     if (!predV) {
2548       predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2549       PREPred = P;
2550       ++NumWithout;
2551     } else if (predV == CurInst) {
2552       /* CurInst dominates this predecessor. */
2553       NumWithout = 2;
2554       break;
2555     } else {
2556       predMap.push_back(std::make_pair(predV, P));
2557       ++NumWith;
2558     }
2559   }
2560 
2561   // Don't do PRE when it might increase code size, i.e. when
2562   // we would need to insert instructions in more than one pred.
2563   if (NumWithout > 1 || NumWith == 0)
2564     return false;
2565 
2566   // We may have a case where all predecessors have the instruction,
2567   // and we just need to insert a phi node. Otherwise, perform
2568   // insertion.
2569   Instruction *PREInstr = nullptr;
2570 
2571   if (NumWithout != 0) {
2572     if (!isSafeToSpeculativelyExecute(CurInst)) {
2573       // It is only valid to insert a new instruction if the current instruction
2574       // is always executed. An instruction with implicit control flow could
2575       // prevent us from doing it. If we cannot speculate the execution, then
2576       // PRE should be prohibited.
2577       if (ICF->isDominatedByICFIFromSameBlock(CurInst))
2578         return false;
2579     }
2580 
2581     // Don't do PRE across indirect branch.
2582     if (isa<IndirectBrInst>(PREPred->getTerminator()))
2583       return false;
2584 
2585     // Don't do PRE across callbr.
2586     // FIXME: Can we do this across the fallthrough edge?
2587     if (isa<CallBrInst>(PREPred->getTerminator()))
2588       return false;
2589 
2590     // We can't do PRE safely on a critical edge, so instead we schedule
2591     // the edge to be split and perform the PRE the next time we iterate
2592     // on the function.
2593     unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2594     if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2595       toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2596       return false;
2597     }
2598     // We need to insert somewhere, so let's give it a shot
2599     PREInstr = CurInst->clone();
2600     if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2601       // If we failed insertion, make sure we remove the instruction.
2602       LLVM_DEBUG(verifyRemoved(PREInstr));
2603       PREInstr->deleteValue();
2604       return false;
2605     }
2606   }
2607 
2608   // Either we should have filled in the PRE instruction, or we should
2609   // not have needed insertions.
2610   assert(PREInstr != nullptr || NumWithout == 0);
2611 
2612   ++NumGVNPRE;
2613 
2614   // Create a PHI to make the value available in this block.
2615   PHINode *Phi =
2616       PHINode::Create(CurInst->getType(), predMap.size(),
2617                       CurInst->getName() + ".pre-phi", &CurrentBlock->front());
2618   for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
2619     if (Value *V = predMap[i].first) {
2620       // If we use an existing value in this phi, we have to patch the original
2621       // value because the phi will be used to replace a later value.
2622       patchReplacementInstruction(CurInst, V);
2623       Phi->addIncoming(V, predMap[i].second);
2624     } else
2625       Phi->addIncoming(PREInstr, PREPred);
2626   }
2627 
2628   VN.add(Phi, ValNo);
2629   // After creating a new PHI for ValNo, the phi translate result for ValNo will
2630   // be changed, so erase the related stale entries in phi translate cache.
2631   VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
2632   addToLeaderTable(ValNo, Phi, CurrentBlock);
2633   Phi->setDebugLoc(CurInst->getDebugLoc());
2634   CurInst->replaceAllUsesWith(Phi);
2635   if (MD && Phi->getType()->isPtrOrPtrVectorTy())
2636     MD->invalidateCachedPointerInfo(Phi);
2637   VN.erase(CurInst);
2638   removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2639 
2640   LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2641   if (MD)
2642     MD->removeInstruction(CurInst);
2643   if (MSSAU)
2644     MSSAU->removeMemoryAccess(CurInst);
2645   LLVM_DEBUG(verifyRemoved(CurInst));
2646   // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
2647   // some assertion failures.
2648   ICF->removeInstruction(CurInst);
2649   CurInst->eraseFromParent();
2650   ++NumGVNInstr;
2651 
2652   return true;
2653 }
2654 
2655 /// Perform a purely local form of PRE that looks for diamond
2656 /// control flow patterns and attempts to perform simple PRE at the join point.
2657 bool GVN::performPRE(Function &F) {
2658   bool Changed = false;
2659   for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
2660     // Nothing to PRE in the entry block.
2661     if (CurrentBlock == &F.getEntryBlock())
2662       continue;
2663 
2664     // Don't perform PRE on an EH pad.
2665     if (CurrentBlock->isEHPad())
2666       continue;
2667 
2668     for (BasicBlock::iterator BI = CurrentBlock->begin(),
2669                               BE = CurrentBlock->end();
2670          BI != BE;) {
2671       Instruction *CurInst = &*BI++;
2672       Changed |= performScalarPRE(CurInst);
2673     }
2674   }
2675 
2676   if (splitCriticalEdges())
2677     Changed = true;
2678 
2679   return Changed;
2680 }
2681 
2682 /// Split the critical edge connecting the given two blocks, and return
2683 /// the block inserted to the critical edge.
2684 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
2685   // GVN does not require loop-simplify, do not try to preserve it if it is not
2686   // possible.
2687   BasicBlock *BB = SplitCriticalEdge(
2688       Pred, Succ,
2689       CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
2690   if (BB) {
2691     if (MD)
2692       MD->invalidateCachedPredecessors();
2693     InvalidBlockRPONumbers = true;
2694   }
2695   return BB;
2696 }
2697 
2698 /// Split critical edges found during the previous
2699 /// iteration that may enable further optimization.
2700 bool GVN::splitCriticalEdges() {
2701   if (toSplit.empty())
2702     return false;
2703 
2704   bool Changed = false;
2705   do {
2706     std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
2707     Changed |= SplitCriticalEdge(Edge.first, Edge.second,
2708                                  CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
2709                nullptr;
2710   } while (!toSplit.empty());
2711   if (Changed) {
2712     if (MD)
2713       MD->invalidateCachedPredecessors();
2714     InvalidBlockRPONumbers = true;
2715   }
2716   return Changed;
2717 }
2718 
2719 /// Executes one iteration of GVN
2720 bool GVN::iterateOnFunction(Function &F) {
2721   cleanupGlobalSets();
2722 
2723   // Top-down walk of the dominator tree
2724   bool Changed = false;
2725   // Needed for value numbering with phi construction to work.
2726   // RPOT walks the graph in its constructor and will not be invalidated during
2727   // processBlock.
2728   ReversePostOrderTraversal<Function *> RPOT(&F);
2729 
2730   for (BasicBlock *BB : RPOT)
2731     Changed |= processBlock(BB);
2732 
2733   return Changed;
2734 }
2735 
2736 void GVN::cleanupGlobalSets() {
2737   VN.clear();
2738   LeaderTable.clear();
2739   BlockRPONumber.clear();
2740   TableAllocator.Reset();
2741   ICF->clear();
2742   InvalidBlockRPONumbers = true;
2743 }
2744 
2745 /// Verify that the specified instruction does not occur in our
2746 /// internal data structures.
2747 void GVN::verifyRemoved(const Instruction *Inst) const {
2748   VN.verifyRemoved(Inst);
2749 
2750   // Walk through the value number scope to make sure the instruction isn't
2751   // ferreted away in it.
2752   for (const auto &I : LeaderTable) {
2753     const LeaderTableEntry *Node = &I.second;
2754     assert(Node->Val != Inst && "Inst still in value numbering scope!");
2755 
2756     while (Node->Next) {
2757       Node = Node->Next;
2758       assert(Node->Val != Inst && "Inst still in value numbering scope!");
2759     }
2760   }
2761 }
2762 
2763 /// BB is declared dead, which implied other blocks become dead as well. This
2764 /// function is to add all these blocks to "DeadBlocks". For the dead blocks'
2765 /// live successors, update their phi nodes by replacing the operands
2766 /// corresponding to dead blocks with UndefVal.
2767 void GVN::addDeadBlock(BasicBlock *BB) {
2768   SmallVector<BasicBlock *, 4> NewDead;
2769   SmallSetVector<BasicBlock *, 4> DF;
2770 
2771   NewDead.push_back(BB);
2772   while (!NewDead.empty()) {
2773     BasicBlock *D = NewDead.pop_back_val();
2774     if (DeadBlocks.count(D))
2775       continue;
2776 
2777     // All blocks dominated by D are dead.
2778     SmallVector<BasicBlock *, 8> Dom;
2779     DT->getDescendants(D, Dom);
2780     DeadBlocks.insert(Dom.begin(), Dom.end());
2781 
2782     // Figure out the dominance-frontier(D).
2783     for (BasicBlock *B : Dom) {
2784       for (BasicBlock *S : successors(B)) {
2785         if (DeadBlocks.count(S))
2786           continue;
2787 
2788         bool AllPredDead = true;
2789         for (BasicBlock *P : predecessors(S))
2790           if (!DeadBlocks.count(P)) {
2791             AllPredDead = false;
2792             break;
2793           }
2794 
2795         if (!AllPredDead) {
2796           // S could be proved dead later on. That is why we don't update phi
2797           // operands at this moment.
2798           DF.insert(S);
2799         } else {
2800           // While S is not dominated by D, it is dead by now. This could take
2801           // place if S already have a dead predecessor before D is declared
2802           // dead.
2803           NewDead.push_back(S);
2804         }
2805       }
2806     }
2807   }
2808 
2809   // For the dead blocks' live successors, update their phi nodes by replacing
2810   // the operands corresponding to dead blocks with UndefVal.
2811   for (BasicBlock *B : DF) {
2812     if (DeadBlocks.count(B))
2813       continue;
2814 
2815     // First, split the critical edges. This might also create additional blocks
2816     // to preserve LoopSimplify form and adjust edges accordingly.
2817     SmallVector<BasicBlock *, 4> Preds(predecessors(B));
2818     for (BasicBlock *P : Preds) {
2819       if (!DeadBlocks.count(P))
2820         continue;
2821 
2822       if (llvm::is_contained(successors(P), B) &&
2823           isCriticalEdge(P->getTerminator(), B)) {
2824         if (BasicBlock *S = splitCriticalEdges(P, B))
2825           DeadBlocks.insert(P = S);
2826       }
2827     }
2828 
2829     // Now undef the incoming values from the dead predecessors.
2830     for (BasicBlock *P : predecessors(B)) {
2831       if (!DeadBlocks.count(P))
2832         continue;
2833       for (PHINode &Phi : B->phis()) {
2834         Phi.setIncomingValueForBlock(P, UndefValue::get(Phi.getType()));
2835         if (MD)
2836           MD->invalidateCachedPointerInfo(&Phi);
2837       }
2838     }
2839   }
2840 }
2841 
2842 // If the given branch is recognized as a foldable branch (i.e. conditional
2843 // branch with constant condition), it will perform following analyses and
2844 // transformation.
2845 //  1) If the dead out-coming edge is a critical-edge, split it. Let
2846 //     R be the target of the dead out-coming edge.
2847 //  1) Identify the set of dead blocks implied by the branch's dead outcoming
2848 //     edge. The result of this step will be {X| X is dominated by R}
2849 //  2) Identify those blocks which haves at least one dead predecessor. The
2850 //     result of this step will be dominance-frontier(R).
2851 //  3) Update the PHIs in DF(R) by replacing the operands corresponding to
2852 //     dead blocks with "UndefVal" in an hope these PHIs will optimized away.
2853 //
2854 // Return true iff *NEW* dead code are found.
2855 bool GVN::processFoldableCondBr(BranchInst *BI) {
2856   if (!BI || BI->isUnconditional())
2857     return false;
2858 
2859   // If a branch has two identical successors, we cannot declare either dead.
2860   if (BI->getSuccessor(0) == BI->getSuccessor(1))
2861     return false;
2862 
2863   ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
2864   if (!Cond)
2865     return false;
2866 
2867   BasicBlock *DeadRoot =
2868       Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
2869   if (DeadBlocks.count(DeadRoot))
2870     return false;
2871 
2872   if (!DeadRoot->getSinglePredecessor())
2873     DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
2874 
2875   addDeadBlock(DeadRoot);
2876   return true;
2877 }
2878 
2879 // performPRE() will trigger assert if it comes across an instruction without
2880 // associated val-num. As it normally has far more live instructions than dead
2881 // instructions, it makes more sense just to "fabricate" a val-number for the
2882 // dead code than checking if instruction involved is dead or not.
2883 void GVN::assignValNumForDeadCode() {
2884   for (BasicBlock *BB : DeadBlocks) {
2885     for (Instruction &Inst : *BB) {
2886       unsigned ValNum = VN.lookupOrAdd(&Inst);
2887       addToLeaderTable(ValNum, &Inst, BB);
2888     }
2889   }
2890 }
2891 
2892 class llvm::gvn::GVNLegacyPass : public FunctionPass {
2893 public:
2894   static char ID; // Pass identification, replacement for typeid
2895 
2896   explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep)
2897       : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) {
2898     initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry());
2899   }
2900 
2901   bool runOnFunction(Function &F) override {
2902     if (skipFunction(F))
2903       return false;
2904 
2905     auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2906 
2907     auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
2908     return Impl.runImpl(
2909         F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
2910         getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
2911         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
2912         getAnalysis<AAResultsWrapperPass>().getAAResults(),
2913         Impl.isMemDepEnabled()
2914             ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
2915             : nullptr,
2916         LIWP ? &LIWP->getLoopInfo() : nullptr,
2917         &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
2918         MSSAWP ? &MSSAWP->getMSSA() : nullptr);
2919   }
2920 
2921   void getAnalysisUsage(AnalysisUsage &AU) const override {
2922     AU.addRequired<AssumptionCacheTracker>();
2923     AU.addRequired<DominatorTreeWrapperPass>();
2924     AU.addRequired<TargetLibraryInfoWrapperPass>();
2925     AU.addRequired<LoopInfoWrapperPass>();
2926     if (Impl.isMemDepEnabled())
2927       AU.addRequired<MemoryDependenceWrapperPass>();
2928     AU.addRequired<AAResultsWrapperPass>();
2929     AU.addPreserved<DominatorTreeWrapperPass>();
2930     AU.addPreserved<GlobalsAAWrapperPass>();
2931     AU.addPreserved<TargetLibraryInfoWrapperPass>();
2932     AU.addPreserved<LoopInfoWrapperPass>();
2933     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2934     AU.addPreserved<MemorySSAWrapperPass>();
2935   }
2936 
2937 private:
2938   GVN Impl;
2939 };
2940 
2941 char GVNLegacyPass::ID = 0;
2942 
2943 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
2944 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2945 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2946 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2947 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2948 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2949 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2950 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
2951 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
2952 
2953 // The public interface to this file...
2954 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
2955   return new GVNLegacyPass(NoMemDepAnalysis);
2956 }
2957