1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/Analysis/VectorUtils.h"
24 #include "llvm/IR/CallSite.h"
25 #include "llvm/IR/ConstantRange.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/Dominators.h"
29 #include "llvm/IR/GetElementPtrTypeIterator.h"
30 #include "llvm/IR/GlobalAlias.h"
31 #include "llvm/IR/GlobalVariable.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/Metadata.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/IR/Statepoint.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/MathExtras.h"
41 #include <algorithm>
42 #include <array>
43 #include <cstring>
44 using namespace llvm;
45 using namespace llvm::PatternMatch;
46 
47 const unsigned MaxDepth = 6;
48 
49 // Controls the number of uses of the value searched for possible
50 // dominating comparisons.
51 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
52                                               cl::Hidden, cl::init(20));
53 
54 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns
55 /// 0). For vector types, returns the element type's bitwidth.
56 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
57   if (unsigned BitWidth = Ty->getScalarSizeInBits())
58     return BitWidth;
59 
60   return DL.getPointerTypeSizeInBits(Ty);
61 }
62 
63 namespace {
64 // Simplifying using an assume can only be done in a particular control-flow
65 // context (the context instruction provides that context). If an assume and
66 // the context instruction are not in the same block then the DT helps in
67 // figuring out if we can use it.
68 struct Query {
69   const DataLayout &DL;
70   AssumptionCache *AC;
71   const Instruction *CxtI;
72   const DominatorTree *DT;
73 
74   /// Set of assumptions that should be excluded from further queries.
75   /// This is because of the potential for mutual recursion to cause
76   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
77   /// classic case of this is assume(x = y), which will attempt to determine
78   /// bits in x from bits in y, which will attempt to determine bits in y from
79   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
80   /// isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
81   /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so
82   /// on.
83   std::array<const Value*, MaxDepth> Excluded;
84   unsigned NumExcluded;
85 
86   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
87         const DominatorTree *DT)
88       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), NumExcluded(0) {}
89 
90   Query(const Query &Q, const Value *NewExcl)
91       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) {
92     Excluded = Q.Excluded;
93     Excluded[NumExcluded++] = NewExcl;
94     assert(NumExcluded <= Excluded.size());
95   }
96 
97   bool isExcluded(const Value *Value) const {
98     if (NumExcluded == 0)
99       return false;
100     auto End = Excluded.begin() + NumExcluded;
101     return std::find(Excluded.begin(), End, Value) != End;
102   }
103 };
104 } // end anonymous namespace
105 
106 // Given the provided Value and, potentially, a context instruction, return
107 // the preferred context instruction (if any).
108 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
109   // If we've been provided with a context instruction, then use that (provided
110   // it has been inserted).
111   if (CxtI && CxtI->getParent())
112     return CxtI;
113 
114   // If the value is really an already-inserted instruction, then use that.
115   CxtI = dyn_cast<Instruction>(V);
116   if (CxtI && CxtI->getParent())
117     return CxtI;
118 
119   return nullptr;
120 }
121 
122 static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
123                              unsigned Depth, const Query &Q);
124 
125 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
126                             const DataLayout &DL, unsigned Depth,
127                             AssumptionCache *AC, const Instruction *CxtI,
128                             const DominatorTree *DT) {
129   ::computeKnownBits(V, KnownZero, KnownOne, Depth,
130                      Query(DL, AC, safeCxtI(V, CxtI), DT));
131 }
132 
133 bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
134                                AssumptionCache *AC, const Instruction *CxtI,
135                                const DominatorTree *DT) {
136   assert(LHS->getType() == RHS->getType() &&
137          "LHS and RHS should have the same type");
138   assert(LHS->getType()->isIntOrIntVectorTy() &&
139          "LHS and RHS should be integers");
140   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
141   APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
142   APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
143   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
144   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
145   return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
146 }
147 
148 static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
149                            unsigned Depth, const Query &Q);
150 
151 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
152                           const DataLayout &DL, unsigned Depth,
153                           AssumptionCache *AC, const Instruction *CxtI,
154                           const DominatorTree *DT) {
155   ::ComputeSignBit(V, KnownZero, KnownOne, Depth,
156                    Query(DL, AC, safeCxtI(V, CxtI), DT));
157 }
158 
159 static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
160                                    const Query &Q);
161 
162 bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero,
163                                   unsigned Depth, AssumptionCache *AC,
164                                   const Instruction *CxtI,
165                                   const DominatorTree *DT) {
166   return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
167                                   Query(DL, AC, safeCxtI(V, CxtI), DT));
168 }
169 
170 static bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q);
171 
172 bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
173                           AssumptionCache *AC, const Instruction *CxtI,
174                           const DominatorTree *DT) {
175   return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
176 }
177 
178 bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth,
179                               AssumptionCache *AC, const Instruction *CxtI,
180                               const DominatorTree *DT) {
181   bool NonNegative, Negative;
182   ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
183   return NonNegative;
184 }
185 
186 bool llvm::isKnownPositive(Value *V, const DataLayout &DL, unsigned Depth,
187                            AssumptionCache *AC, const Instruction *CxtI,
188                            const DominatorTree *DT) {
189   if (auto *CI = dyn_cast<ConstantInt>(V))
190     return CI->getValue().isStrictlyPositive();
191 
192   // TODO: We'd doing two recursive queries here.  We should factor this such
193   // that only a single query is needed.
194   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
195     isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
196 }
197 
198 static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q);
199 
200 bool llvm::isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
201                           AssumptionCache *AC, const Instruction *CxtI,
202                           const DominatorTree *DT) {
203   return ::isKnownNonEqual(V1, V2, Query(DL, AC,
204                                          safeCxtI(V1, safeCxtI(V2, CxtI)),
205                                          DT));
206 }
207 
208 static bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth,
209                               const Query &Q);
210 
211 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
212                              unsigned Depth, AssumptionCache *AC,
213                              const Instruction *CxtI, const DominatorTree *DT) {
214   return ::MaskedValueIsZero(V, Mask, Depth,
215                              Query(DL, AC, safeCxtI(V, CxtI), DT));
216 }
217 
218 static unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q);
219 
220 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL,
221                                   unsigned Depth, AssumptionCache *AC,
222                                   const Instruction *CxtI,
223                                   const DominatorTree *DT) {
224   return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
225 }
226 
227 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
228                                    APInt &KnownZero, APInt &KnownOne,
229                                    APInt &KnownZero2, APInt &KnownOne2,
230                                    unsigned Depth, const Query &Q) {
231   if (!Add) {
232     if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
233       // We know that the top bits of C-X are clear if X contains less bits
234       // than C (i.e. no wrap-around can happen).  For example, 20-X is
235       // positive if we can prove that X is >= 0 and < 16.
236       if (!CLHS->getValue().isNegative()) {
237         unsigned BitWidth = KnownZero.getBitWidth();
238         unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
239         // NLZ can't be BitWidth with no sign bit
240         APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
241         computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
242 
243         // If all of the MaskV bits are known to be zero, then we know the
244         // output top bits are zero, because we now know that the output is
245         // from [0-C].
246         if ((KnownZero2 & MaskV) == MaskV) {
247           unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
248           // Top bits known zero.
249           KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
250         }
251       }
252     }
253   }
254 
255   unsigned BitWidth = KnownZero.getBitWidth();
256 
257   // If an initial sequence of bits in the result is not needed, the
258   // corresponding bits in the operands are not needed.
259   APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
260   computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, Depth + 1, Q);
261   computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
262 
263   // Carry in a 1 for a subtract, rather than a 0.
264   APInt CarryIn(BitWidth, 0);
265   if (!Add) {
266     // Sum = LHS + ~RHS + 1
267     std::swap(KnownZero2, KnownOne2);
268     CarryIn.setBit(0);
269   }
270 
271   APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
272   APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
273 
274   // Compute known bits of the carry.
275   APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
276   APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
277 
278   // Compute set of known bits (where all three relevant bits are known).
279   APInt LHSKnown = LHSKnownZero | LHSKnownOne;
280   APInt RHSKnown = KnownZero2 | KnownOne2;
281   APInt CarryKnown = CarryKnownZero | CarryKnownOne;
282   APInt Known = LHSKnown & RHSKnown & CarryKnown;
283 
284   assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
285          "known bits of sum differ");
286 
287   // Compute known bits of the result.
288   KnownZero = ~PossibleSumOne & Known;
289   KnownOne = PossibleSumOne & Known;
290 
291   // Are we still trying to solve for the sign bit?
292   if (!Known.isNegative()) {
293     if (NSW) {
294       // Adding two non-negative numbers, or subtracting a negative number from
295       // a non-negative one, can't wrap into negative.
296       if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
297         KnownZero |= APInt::getSignBit(BitWidth);
298       // Adding two negative numbers, or subtracting a non-negative number from
299       // a negative one, can't wrap into non-negative.
300       else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
301         KnownOne |= APInt::getSignBit(BitWidth);
302     }
303   }
304 }
305 
306 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
307                                 APInt &KnownZero, APInt &KnownOne,
308                                 APInt &KnownZero2, APInt &KnownOne2,
309                                 unsigned Depth, const Query &Q) {
310   unsigned BitWidth = KnownZero.getBitWidth();
311   computeKnownBits(Op1, KnownZero, KnownOne, Depth + 1, Q);
312   computeKnownBits(Op0, KnownZero2, KnownOne2, Depth + 1, Q);
313 
314   bool isKnownNegative = false;
315   bool isKnownNonNegative = false;
316   // If the multiplication is known not to overflow, compute the sign bit.
317   if (NSW) {
318     if (Op0 == Op1) {
319       // The product of a number with itself is non-negative.
320       isKnownNonNegative = true;
321     } else {
322       bool isKnownNonNegativeOp1 = KnownZero.isNegative();
323       bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
324       bool isKnownNegativeOp1 = KnownOne.isNegative();
325       bool isKnownNegativeOp0 = KnownOne2.isNegative();
326       // The product of two numbers with the same sign is non-negative.
327       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
328         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
329       // The product of a negative number and a non-negative number is either
330       // negative or zero.
331       if (!isKnownNonNegative)
332         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
333                            isKnownNonZero(Op0, Depth, Q)) ||
334                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
335                            isKnownNonZero(Op1, Depth, Q));
336     }
337   }
338 
339   // If low bits are zero in either operand, output low known-0 bits.
340   // Also compute a conservative estimate for high known-0 bits.
341   // More trickiness is possible, but this is sufficient for the
342   // interesting case of alignment computation.
343   KnownOne.clearAllBits();
344   unsigned TrailZ = KnownZero.countTrailingOnes() +
345                     KnownZero2.countTrailingOnes();
346   unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
347                              KnownZero2.countLeadingOnes(),
348                              BitWidth) - BitWidth;
349 
350   TrailZ = std::min(TrailZ, BitWidth);
351   LeadZ = std::min(LeadZ, BitWidth);
352   KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
353               APInt::getHighBitsSet(BitWidth, LeadZ);
354 
355   // Only make use of no-wrap flags if we failed to compute the sign bit
356   // directly.  This matters if the multiplication always overflows, in
357   // which case we prefer to follow the result of the direct computation,
358   // though as the program is invoking undefined behaviour we can choose
359   // whatever we like here.
360   if (isKnownNonNegative && !KnownOne.isNegative())
361     KnownZero.setBit(BitWidth - 1);
362   else if (isKnownNegative && !KnownZero.isNegative())
363     KnownOne.setBit(BitWidth - 1);
364 }
365 
366 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
367                                              APInt &KnownZero,
368                                              APInt &KnownOne) {
369   unsigned BitWidth = KnownZero.getBitWidth();
370   unsigned NumRanges = Ranges.getNumOperands() / 2;
371   assert(NumRanges >= 1);
372 
373   KnownZero.setAllBits();
374   KnownOne.setAllBits();
375 
376   for (unsigned i = 0; i < NumRanges; ++i) {
377     ConstantInt *Lower =
378         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
379     ConstantInt *Upper =
380         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
381     ConstantRange Range(Lower->getValue(), Upper->getValue());
382 
383     // The first CommonPrefixBits of all values in Range are equal.
384     unsigned CommonPrefixBits =
385         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
386 
387     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
388     KnownOne &= Range.getUnsignedMax() & Mask;
389     KnownZero &= ~Range.getUnsignedMax() & Mask;
390   }
391 }
392 
393 static bool isEphemeralValueOf(Instruction *I, const Value *E) {
394   SmallVector<const Value *, 16> WorkSet(1, I);
395   SmallPtrSet<const Value *, 32> Visited;
396   SmallPtrSet<const Value *, 16> EphValues;
397 
398   // The instruction defining an assumption's condition itself is always
399   // considered ephemeral to that assumption (even if it has other
400   // non-ephemeral users). See r246696's test case for an example.
401   if (std::find(I->op_begin(), I->op_end(), E) != I->op_end())
402     return true;
403 
404   while (!WorkSet.empty()) {
405     const Value *V = WorkSet.pop_back_val();
406     if (!Visited.insert(V).second)
407       continue;
408 
409     // If all uses of this value are ephemeral, then so is this value.
410     if (std::all_of(V->user_begin(), V->user_end(),
411                     [&](const User *U) { return EphValues.count(U); })) {
412       if (V == E)
413         return true;
414 
415       EphValues.insert(V);
416       if (const User *U = dyn_cast<User>(V))
417         for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
418              J != JE; ++J) {
419           if (isSafeToSpeculativelyExecute(*J))
420             WorkSet.push_back(*J);
421         }
422     }
423   }
424 
425   return false;
426 }
427 
428 // Is this an intrinsic that cannot be speculated but also cannot trap?
429 static bool isAssumeLikeIntrinsic(const Instruction *I) {
430   if (const CallInst *CI = dyn_cast<CallInst>(I))
431     if (Function *F = CI->getCalledFunction())
432       switch (F->getIntrinsicID()) {
433       default: break;
434       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
435       case Intrinsic::assume:
436       case Intrinsic::dbg_declare:
437       case Intrinsic::dbg_value:
438       case Intrinsic::invariant_start:
439       case Intrinsic::invariant_end:
440       case Intrinsic::lifetime_start:
441       case Intrinsic::lifetime_end:
442       case Intrinsic::objectsize:
443       case Intrinsic::ptr_annotation:
444       case Intrinsic::var_annotation:
445         return true;
446       }
447 
448   return false;
449 }
450 
451 static bool isValidAssumeForContext(Value *V, const Instruction *CxtI,
452                                     const DominatorTree *DT) {
453   Instruction *Inv = cast<Instruction>(V);
454 
455   // There are two restrictions on the use of an assume:
456   //  1. The assume must dominate the context (or the control flow must
457   //     reach the assume whenever it reaches the context).
458   //  2. The context must not be in the assume's set of ephemeral values
459   //     (otherwise we will use the assume to prove that the condition
460   //     feeding the assume is trivially true, thus causing the removal of
461   //     the assume).
462 
463   if (DT) {
464     if (DT->dominates(Inv, CxtI)) {
465       return true;
466     } else if (Inv->getParent() == CxtI->getParent()) {
467       // The context comes first, but they're both in the same block. Make sure
468       // there is nothing in between that might interrupt the control flow.
469       for (BasicBlock::const_iterator I =
470              std::next(BasicBlock::const_iterator(CxtI)),
471                                       IE(Inv); I != IE; ++I)
472         if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
473           return false;
474 
475       return !isEphemeralValueOf(Inv, CxtI);
476     }
477 
478     return false;
479   }
480 
481   // When we don't have a DT, we do a limited search...
482   if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
483     return true;
484   } else if (Inv->getParent() == CxtI->getParent()) {
485     // Search forward from the assume until we reach the context (or the end
486     // of the block); the common case is that the assume will come first.
487     for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
488          IE = Inv->getParent()->end(); I != IE; ++I)
489       if (&*I == CxtI)
490         return true;
491 
492     // The context must come first...
493     for (BasicBlock::const_iterator I =
494            std::next(BasicBlock::const_iterator(CxtI)),
495                                     IE(Inv); I != IE; ++I)
496       if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
497         return false;
498 
499     return !isEphemeralValueOf(Inv, CxtI);
500   }
501 
502   return false;
503 }
504 
505 bool llvm::isValidAssumeForContext(const Instruction *I,
506                                    const Instruction *CxtI,
507                                    const DominatorTree *DT) {
508   return ::isValidAssumeForContext(const_cast<Instruction *>(I), CxtI, DT);
509 }
510 
511 template<typename LHS, typename RHS>
512 inline match_combine_or<CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>,
513                         CmpClass_match<RHS, LHS, ICmpInst, ICmpInst::Predicate>>
514 m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
515   return m_CombineOr(m_ICmp(Pred, L, R), m_ICmp(Pred, R, L));
516 }
517 
518 template<typename LHS, typename RHS>
519 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::And>,
520                         BinaryOp_match<RHS, LHS, Instruction::And>>
521 m_c_And(const LHS &L, const RHS &R) {
522   return m_CombineOr(m_And(L, R), m_And(R, L));
523 }
524 
525 template<typename LHS, typename RHS>
526 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Or>,
527                         BinaryOp_match<RHS, LHS, Instruction::Or>>
528 m_c_Or(const LHS &L, const RHS &R) {
529   return m_CombineOr(m_Or(L, R), m_Or(R, L));
530 }
531 
532 template<typename LHS, typename RHS>
533 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Xor>,
534                         BinaryOp_match<RHS, LHS, Instruction::Xor>>
535 m_c_Xor(const LHS &L, const RHS &R) {
536   return m_CombineOr(m_Xor(L, R), m_Xor(R, L));
537 }
538 
539 static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
540                                        APInt &KnownOne, unsigned Depth,
541                                        const Query &Q) {
542   // Use of assumptions is context-sensitive. If we don't have a context, we
543   // cannot use them!
544   if (!Q.AC || !Q.CxtI)
545     return;
546 
547   unsigned BitWidth = KnownZero.getBitWidth();
548 
549   for (auto &AssumeVH : Q.AC->assumptions()) {
550     if (!AssumeVH)
551       continue;
552     CallInst *I = cast<CallInst>(AssumeVH);
553     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
554            "Got assumption for the wrong function!");
555     if (Q.isExcluded(I))
556       continue;
557 
558     // Warning: This loop can end up being somewhat performance sensetive.
559     // We're running this loop for once for each value queried resulting in a
560     // runtime of ~O(#assumes * #values).
561 
562     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
563            "must be an assume intrinsic");
564 
565     Value *Arg = I->getArgOperand(0);
566 
567     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
568       assert(BitWidth == 1 && "assume operand is not i1?");
569       KnownZero.clearAllBits();
570       KnownOne.setAllBits();
571       return;
572     }
573 
574     // The remaining tests are all recursive, so bail out if we hit the limit.
575     if (Depth == MaxDepth)
576       continue;
577 
578     Value *A, *B;
579     auto m_V = m_CombineOr(m_Specific(V),
580                            m_CombineOr(m_PtrToInt(m_Specific(V)),
581                            m_BitCast(m_Specific(V))));
582 
583     CmpInst::Predicate Pred;
584     ConstantInt *C;
585     // assume(v = a)
586     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
587         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
588       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
589       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
590       KnownZero |= RHSKnownZero;
591       KnownOne  |= RHSKnownOne;
592     // assume(v & b = a)
593     } else if (match(Arg,
594                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
595                Pred == ICmpInst::ICMP_EQ &&
596                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
597       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
598       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
599       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
600       computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
601 
602       // For those bits in the mask that are known to be one, we can propagate
603       // known bits from the RHS to V.
604       KnownZero |= RHSKnownZero & MaskKnownOne;
605       KnownOne  |= RHSKnownOne  & MaskKnownOne;
606     // assume(~(v & b) = a)
607     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
608                                    m_Value(A))) &&
609                Pred == ICmpInst::ICMP_EQ &&
610                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
611       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
612       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
613       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
614       computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
615 
616       // For those bits in the mask that are known to be one, we can propagate
617       // inverted known bits from the RHS to V.
618       KnownZero |= RHSKnownOne  & MaskKnownOne;
619       KnownOne  |= RHSKnownZero & MaskKnownOne;
620     // assume(v | b = a)
621     } else if (match(Arg,
622                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
623                Pred == ICmpInst::ICMP_EQ &&
624                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
625       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
626       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
627       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
628       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
629 
630       // For those bits in B that are known to be zero, we can propagate known
631       // bits from the RHS to V.
632       KnownZero |= RHSKnownZero & BKnownZero;
633       KnownOne  |= RHSKnownOne  & BKnownZero;
634     // assume(~(v | b) = a)
635     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
636                                    m_Value(A))) &&
637                Pred == ICmpInst::ICMP_EQ &&
638                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
639       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
640       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
641       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
642       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
643 
644       // For those bits in B that are known to be zero, we can propagate
645       // inverted known bits from the RHS to V.
646       KnownZero |= RHSKnownOne  & BKnownZero;
647       KnownOne  |= RHSKnownZero & BKnownZero;
648     // assume(v ^ b = a)
649     } else if (match(Arg,
650                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
651                Pred == ICmpInst::ICMP_EQ &&
652                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
653       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
654       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
655       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
656       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
657 
658       // For those bits in B that are known to be zero, we can propagate known
659       // bits from the RHS to V. For those bits in B that are known to be one,
660       // we can propagate inverted known bits from the RHS to V.
661       KnownZero |= RHSKnownZero & BKnownZero;
662       KnownOne  |= RHSKnownOne  & BKnownZero;
663       KnownZero |= RHSKnownOne  & BKnownOne;
664       KnownOne  |= RHSKnownZero & BKnownOne;
665     // assume(~(v ^ b) = a)
666     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
667                                    m_Value(A))) &&
668                Pred == ICmpInst::ICMP_EQ &&
669                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
670       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
671       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
672       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
673       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
674 
675       // For those bits in B that are known to be zero, we can propagate
676       // inverted known bits from the RHS to V. For those bits in B that are
677       // known to be one, we can propagate known bits from the RHS to V.
678       KnownZero |= RHSKnownOne  & BKnownZero;
679       KnownOne  |= RHSKnownZero & BKnownZero;
680       KnownZero |= RHSKnownZero & BKnownOne;
681       KnownOne  |= RHSKnownOne  & BKnownOne;
682     // assume(v << c = a)
683     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
684                                    m_Value(A))) &&
685                Pred == ICmpInst::ICMP_EQ &&
686                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
687       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
688       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
689       // For those bits in RHS that are known, we can propagate them to known
690       // bits in V shifted to the right by C.
691       KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
692       KnownOne  |= RHSKnownOne.lshr(C->getZExtValue());
693     // assume(~(v << c) = a)
694     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
695                                    m_Value(A))) &&
696                Pred == ICmpInst::ICMP_EQ &&
697                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
698       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
699       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
700       // For those bits in RHS that are known, we can propagate them inverted
701       // to known bits in V shifted to the right by C.
702       KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
703       KnownOne  |= RHSKnownZero.lshr(C->getZExtValue());
704     // assume(v >> c = a)
705     } else if (match(Arg,
706                      m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
707                                                 m_AShr(m_V, m_ConstantInt(C))),
708                               m_Value(A))) &&
709                Pred == ICmpInst::ICMP_EQ &&
710                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
711       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
712       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
713       // For those bits in RHS that are known, we can propagate them to known
714       // bits in V shifted to the right by C.
715       KnownZero |= RHSKnownZero << C->getZExtValue();
716       KnownOne  |= RHSKnownOne  << C->getZExtValue();
717     // assume(~(v >> c) = a)
718     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
719                                              m_LShr(m_V, m_ConstantInt(C)),
720                                              m_AShr(m_V, m_ConstantInt(C)))),
721                                    m_Value(A))) &&
722                Pred == ICmpInst::ICMP_EQ &&
723                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
724       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
725       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
726       // For those bits in RHS that are known, we can propagate them inverted
727       // to known bits in V shifted to the right by C.
728       KnownZero |= RHSKnownOne  << C->getZExtValue();
729       KnownOne  |= RHSKnownZero << C->getZExtValue();
730     // assume(v >=_s c) where c is non-negative
731     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
732                Pred == ICmpInst::ICMP_SGE &&
733                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
734       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
735       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
736 
737       if (RHSKnownZero.isNegative()) {
738         // We know that the sign bit is zero.
739         KnownZero |= APInt::getSignBit(BitWidth);
740       }
741     // assume(v >_s c) where c is at least -1.
742     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
743                Pred == ICmpInst::ICMP_SGT &&
744                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
745       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
746       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
747 
748       if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
749         // We know that the sign bit is zero.
750         KnownZero |= APInt::getSignBit(BitWidth);
751       }
752     // assume(v <=_s c) where c is negative
753     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
754                Pred == ICmpInst::ICMP_SLE &&
755                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
756       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
757       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
758 
759       if (RHSKnownOne.isNegative()) {
760         // We know that the sign bit is one.
761         KnownOne |= APInt::getSignBit(BitWidth);
762       }
763     // assume(v <_s c) where c is non-positive
764     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
765                Pred == ICmpInst::ICMP_SLT &&
766                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
767       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
768       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
769 
770       if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
771         // We know that the sign bit is one.
772         KnownOne |= APInt::getSignBit(BitWidth);
773       }
774     // assume(v <=_u c)
775     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
776                Pred == ICmpInst::ICMP_ULE &&
777                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
778       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
779       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
780 
781       // Whatever high bits in c are zero are known to be zero.
782       KnownZero |=
783         APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
784     // assume(v <_u c)
785     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
786                Pred == ICmpInst::ICMP_ULT &&
787                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
788       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
789       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
790 
791       // Whatever high bits in c are zero are known to be zero (if c is a power
792       // of 2, then one more).
793       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
794         KnownZero |=
795           APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1);
796       else
797         KnownZero |=
798           APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
799     }
800   }
801 }
802 
803 // Compute known bits from a shift operator, including those with a
804 // non-constant shift amount. KnownZero and KnownOne are the outputs of this
805 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the
806 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific
807 // functors that, given the known-zero or known-one bits respectively, and a
808 // shift amount, compute the implied known-zero or known-one bits of the shift
809 // operator's result respectively for that shift amount. The results from calling
810 // KZF and KOF are conservatively combined for all permitted shift amounts.
811 template <typename KZFunctor, typename KOFunctor>
812 static void computeKnownBitsFromShiftOperator(Operator *I,
813               APInt &KnownZero, APInt &KnownOne,
814               APInt &KnownZero2, APInt &KnownOne2,
815               unsigned Depth, const Query &Q, KZFunctor KZF, KOFunctor KOF) {
816   unsigned BitWidth = KnownZero.getBitWidth();
817 
818   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
819     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
820 
821     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
822     KnownZero = KZF(KnownZero, ShiftAmt);
823     KnownOne  = KOF(KnownOne, ShiftAmt);
824     return;
825   }
826 
827   computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
828 
829   // Note: We cannot use KnownZero.getLimitedValue() here, because if
830   // BitWidth > 64 and any upper bits are known, we'll end up returning the
831   // limit value (which implies all bits are known).
832   uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue();
833   uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue();
834 
835   // It would be more-clearly correct to use the two temporaries for this
836   // calculation. Reusing the APInts here to prevent unnecessary allocations.
837   KnownZero.clearAllBits();
838   KnownOne.clearAllBits();
839 
840   // If we know the shifter operand is nonzero, we can sometimes infer more
841   // known bits. However this is expensive to compute, so be lazy about it and
842   // only compute it when absolutely necessary.
843   Optional<bool> ShifterOperandIsNonZero;
844 
845   // Early exit if we can't constrain any well-defined shift amount.
846   if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
847     ShifterOperandIsNonZero =
848         isKnownNonZero(I->getOperand(1), Depth + 1, Q);
849     if (!*ShifterOperandIsNonZero)
850       return;
851   }
852 
853   computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
854 
855   KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
856   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
857     // Combine the shifted known input bits only for those shift amounts
858     // compatible with its known constraints.
859     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
860       continue;
861     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
862       continue;
863     // If we know the shifter is nonzero, we may be able to infer more known
864     // bits. This check is sunk down as far as possible to avoid the expensive
865     // call to isKnownNonZero if the cheaper checks above fail.
866     if (ShiftAmt == 0) {
867       if (!ShifterOperandIsNonZero.hasValue())
868         ShifterOperandIsNonZero =
869             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
870       if (*ShifterOperandIsNonZero)
871         continue;
872     }
873 
874     KnownZero &= KZF(KnownZero2, ShiftAmt);
875     KnownOne  &= KOF(KnownOne2, ShiftAmt);
876   }
877 
878   // If there are no compatible shift amounts, then we've proven that the shift
879   // amount must be >= the BitWidth, and the result is undefined. We could
880   // return anything we'd like, but we need to make sure the sets of known bits
881   // stay disjoint (it should be better for some other code to actually
882   // propagate the undef than to pick a value here using known bits).
883   if ((KnownZero & KnownOne) != 0) {
884     KnownZero.clearAllBits();
885     KnownOne.clearAllBits();
886   }
887 }
888 
889 static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
890                                          APInt &KnownOne, unsigned Depth,
891                                          const Query &Q) {
892   unsigned BitWidth = KnownZero.getBitWidth();
893 
894   APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
895   switch (I->getOpcode()) {
896   default: break;
897   case Instruction::Load:
898     if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
899       computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
900     break;
901   case Instruction::And: {
902     // If either the LHS or the RHS are Zero, the result is zero.
903     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
904     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
905 
906     // Output known-1 bits are only known if set in both the LHS & RHS.
907     KnownOne &= KnownOne2;
908     // Output known-0 are known to be clear if zero in either the LHS | RHS.
909     KnownZero |= KnownZero2;
910 
911     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
912     // here we handle the more general case of adding any odd number by
913     // matching the form add(x, add(x, y)) where y is odd.
914     // TODO: This could be generalized to clearing any bit set in y where the
915     // following bit is known to be unset in y.
916     Value *Y = nullptr;
917     if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
918                                       m_Value(Y))) ||
919         match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
920                                       m_Value(Y)))) {
921       APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0);
922       computeKnownBits(Y, KnownZero3, KnownOne3, Depth + 1, Q);
923       if (KnownOne3.countTrailingOnes() > 0)
924         KnownZero |= APInt::getLowBitsSet(BitWidth, 1);
925     }
926     break;
927   }
928   case Instruction::Or: {
929     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
930     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
931 
932     // Output known-0 bits are only known if clear in both the LHS & RHS.
933     KnownZero &= KnownZero2;
934     // Output known-1 are known to be set if set in either the LHS | RHS.
935     KnownOne |= KnownOne2;
936     break;
937   }
938   case Instruction::Xor: {
939     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
940     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
941 
942     // Output known-0 bits are known if clear or set in both the LHS & RHS.
943     APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
944     // Output known-1 are known to be set if set in only one of the LHS, RHS.
945     KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
946     KnownZero = KnownZeroOut;
947     break;
948   }
949   case Instruction::Mul: {
950     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
951     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
952                         KnownOne, KnownZero2, KnownOne2, Depth, Q);
953     break;
954   }
955   case Instruction::UDiv: {
956     // For the purposes of computing leading zeros we can conservatively
957     // treat a udiv as a logical right shift by the power of 2 known to
958     // be less than the denominator.
959     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
960     unsigned LeadZ = KnownZero2.countLeadingOnes();
961 
962     KnownOne2.clearAllBits();
963     KnownZero2.clearAllBits();
964     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
965     unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
966     if (RHSUnknownLeadingOnes != BitWidth)
967       LeadZ = std::min(BitWidth,
968                        LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
969 
970     KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
971     break;
972   }
973   case Instruction::Select:
974     computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
975     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
976 
977     // Only known if known in both the LHS and RHS.
978     KnownOne &= KnownOne2;
979     KnownZero &= KnownZero2;
980     break;
981   case Instruction::FPTrunc:
982   case Instruction::FPExt:
983   case Instruction::FPToUI:
984   case Instruction::FPToSI:
985   case Instruction::SIToFP:
986   case Instruction::UIToFP:
987     break; // Can't work with floating point.
988   case Instruction::PtrToInt:
989   case Instruction::IntToPtr:
990   case Instruction::AddrSpaceCast: // Pointers could be different sizes.
991     // FALL THROUGH and handle them the same as zext/trunc.
992   case Instruction::ZExt:
993   case Instruction::Trunc: {
994     Type *SrcTy = I->getOperand(0)->getType();
995 
996     unsigned SrcBitWidth;
997     // Note that we handle pointer operands here because of inttoptr/ptrtoint
998     // which fall through here.
999     SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1000 
1001     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1002     KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
1003     KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
1004     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1005     KnownZero = KnownZero.zextOrTrunc(BitWidth);
1006     KnownOne = KnownOne.zextOrTrunc(BitWidth);
1007     // Any top bits are known to be zero.
1008     if (BitWidth > SrcBitWidth)
1009       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1010     break;
1011   }
1012   case Instruction::BitCast: {
1013     Type *SrcTy = I->getOperand(0)->getType();
1014     if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy() ||
1015          SrcTy->isFloatingPointTy()) &&
1016         // TODO: For now, not handling conversions like:
1017         // (bitcast i64 %x to <2 x i32>)
1018         !I->getType()->isVectorTy()) {
1019       computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1020       break;
1021     }
1022     break;
1023   }
1024   case Instruction::SExt: {
1025     // Compute the bits in the result that are not present in the input.
1026     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1027 
1028     KnownZero = KnownZero.trunc(SrcBitWidth);
1029     KnownOne = KnownOne.trunc(SrcBitWidth);
1030     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1031     KnownZero = KnownZero.zext(BitWidth);
1032     KnownOne = KnownOne.zext(BitWidth);
1033 
1034     // If the sign bit of the input is known set or clear, then we know the
1035     // top bits of the result.
1036     if (KnownZero[SrcBitWidth-1])             // Input sign bit known zero
1037       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1038     else if (KnownOne[SrcBitWidth-1])           // Input sign bit known set
1039       KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1040     break;
1041   }
1042   case Instruction::Shl: {
1043     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1044     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1045       return (KnownZero << ShiftAmt) |
1046              APInt::getLowBitsSet(BitWidth, ShiftAmt); // Low bits known 0.
1047     };
1048 
1049     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1050       return KnownOne << ShiftAmt;
1051     };
1052 
1053     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1054                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1055                                       KOF);
1056     break;
1057   }
1058   case Instruction::LShr: {
1059     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1060     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1061       return APIntOps::lshr(KnownZero, ShiftAmt) |
1062              // High bits known zero.
1063              APInt::getHighBitsSet(BitWidth, ShiftAmt);
1064     };
1065 
1066     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1067       return APIntOps::lshr(KnownOne, ShiftAmt);
1068     };
1069 
1070     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1071                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1072                                       KOF);
1073     break;
1074   }
1075   case Instruction::AShr: {
1076     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1077     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1078       return APIntOps::ashr(KnownZero, ShiftAmt);
1079     };
1080 
1081     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1082       return APIntOps::ashr(KnownOne, ShiftAmt);
1083     };
1084 
1085     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1086                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1087                                       KOF);
1088     break;
1089   }
1090   case Instruction::Sub: {
1091     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1092     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1093                            KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1094                            Q);
1095     break;
1096   }
1097   case Instruction::Add: {
1098     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1099     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1100                            KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1101                            Q);
1102     break;
1103   }
1104   case Instruction::SRem:
1105     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1106       APInt RA = Rem->getValue().abs();
1107       if (RA.isPowerOf2()) {
1108         APInt LowBits = RA - 1;
1109         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1,
1110                          Q);
1111 
1112         // The low bits of the first operand are unchanged by the srem.
1113         KnownZero = KnownZero2 & LowBits;
1114         KnownOne = KnownOne2 & LowBits;
1115 
1116         // If the first operand is non-negative or has all low bits zero, then
1117         // the upper bits are all zero.
1118         if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
1119           KnownZero |= ~LowBits;
1120 
1121         // If the first operand is negative and not all low bits are zero, then
1122         // the upper bits are all one.
1123         if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
1124           KnownOne |= ~LowBits;
1125 
1126         assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1127       }
1128     }
1129 
1130     // The sign bit is the LHS's sign bit, except when the result of the
1131     // remainder is zero.
1132     if (KnownZero.isNonNegative()) {
1133       APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
1134       computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
1135                        Q);
1136       // If it's known zero, our sign bit is also zero.
1137       if (LHSKnownZero.isNegative())
1138         KnownZero.setBit(BitWidth - 1);
1139     }
1140 
1141     break;
1142   case Instruction::URem: {
1143     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1144       APInt RA = Rem->getValue();
1145       if (RA.isPowerOf2()) {
1146         APInt LowBits = (RA - 1);
1147         computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1148         KnownZero |= ~LowBits;
1149         KnownOne &= LowBits;
1150         break;
1151       }
1152     }
1153 
1154     // Since the result is less than or equal to either operand, any leading
1155     // zero bits in either operand must also exist in the result.
1156     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1157     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
1158 
1159     unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
1160                                 KnownZero2.countLeadingOnes());
1161     KnownOne.clearAllBits();
1162     KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
1163     break;
1164   }
1165 
1166   case Instruction::Alloca: {
1167     AllocaInst *AI = cast<AllocaInst>(I);
1168     unsigned Align = AI->getAlignment();
1169     if (Align == 0)
1170       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1171 
1172     if (Align > 0)
1173       KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1174     break;
1175   }
1176   case Instruction::GetElementPtr: {
1177     // Analyze all of the subscripts of this getelementptr instruction
1178     // to determine if we can prove known low zero bits.
1179     APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1180     computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, Depth + 1,
1181                      Q);
1182     unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1183 
1184     gep_type_iterator GTI = gep_type_begin(I);
1185     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1186       Value *Index = I->getOperand(i);
1187       if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1188         // Handle struct member offset arithmetic.
1189 
1190         // Handle case when index is vector zeroinitializer
1191         Constant *CIndex = cast<Constant>(Index);
1192         if (CIndex->isZeroValue())
1193           continue;
1194 
1195         if (CIndex->getType()->isVectorTy())
1196           Index = CIndex->getSplatValue();
1197 
1198         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1199         const StructLayout *SL = Q.DL.getStructLayout(STy);
1200         uint64_t Offset = SL->getElementOffset(Idx);
1201         TrailZ = std::min<unsigned>(TrailZ,
1202                                     countTrailingZeros(Offset));
1203       } else {
1204         // Handle array index arithmetic.
1205         Type *IndexedTy = GTI.getIndexedType();
1206         if (!IndexedTy->isSized()) {
1207           TrailZ = 0;
1208           break;
1209         }
1210         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1211         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1212         LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1213         computeKnownBits(Index, LocalKnownZero, LocalKnownOne, Depth + 1, Q);
1214         TrailZ = std::min(TrailZ,
1215                           unsigned(countTrailingZeros(TypeSize) +
1216                                    LocalKnownZero.countTrailingOnes()));
1217       }
1218     }
1219 
1220     KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
1221     break;
1222   }
1223   case Instruction::PHI: {
1224     PHINode *P = cast<PHINode>(I);
1225     // Handle the case of a simple two-predecessor recurrence PHI.
1226     // There's a lot more that could theoretically be done here, but
1227     // this is sufficient to catch some interesting cases.
1228     if (P->getNumIncomingValues() == 2) {
1229       for (unsigned i = 0; i != 2; ++i) {
1230         Value *L = P->getIncomingValue(i);
1231         Value *R = P->getIncomingValue(!i);
1232         Operator *LU = dyn_cast<Operator>(L);
1233         if (!LU)
1234           continue;
1235         unsigned Opcode = LU->getOpcode();
1236         // Check for operations that have the property that if
1237         // both their operands have low zero bits, the result
1238         // will have low zero bits.
1239         if (Opcode == Instruction::Add ||
1240             Opcode == Instruction::Sub ||
1241             Opcode == Instruction::And ||
1242             Opcode == Instruction::Or ||
1243             Opcode == Instruction::Mul) {
1244           Value *LL = LU->getOperand(0);
1245           Value *LR = LU->getOperand(1);
1246           // Find a recurrence.
1247           if (LL == I)
1248             L = LR;
1249           else if (LR == I)
1250             L = LL;
1251           else
1252             break;
1253           // Ok, we have a PHI of the form L op= R. Check for low
1254           // zero bits.
1255           computeKnownBits(R, KnownZero2, KnownOne2, Depth + 1, Q);
1256 
1257           // We need to take the minimum number of known bits
1258           APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
1259           computeKnownBits(L, KnownZero3, KnownOne3, Depth + 1, Q);
1260 
1261           KnownZero = APInt::getLowBitsSet(BitWidth,
1262                                            std::min(KnownZero2.countTrailingOnes(),
1263                                                     KnownZero3.countTrailingOnes()));
1264           break;
1265         }
1266       }
1267     }
1268 
1269     // Unreachable blocks may have zero-operand PHI nodes.
1270     if (P->getNumIncomingValues() == 0)
1271       break;
1272 
1273     // Otherwise take the unions of the known bit sets of the operands,
1274     // taking conservative care to avoid excessive recursion.
1275     if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
1276       // Skip if every incoming value references to ourself.
1277       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1278         break;
1279 
1280       KnownZero = APInt::getAllOnesValue(BitWidth);
1281       KnownOne = APInt::getAllOnesValue(BitWidth);
1282       for (Value *IncValue : P->incoming_values()) {
1283         // Skip direct self references.
1284         if (IncValue == P) continue;
1285 
1286         KnownZero2 = APInt(BitWidth, 0);
1287         KnownOne2 = APInt(BitWidth, 0);
1288         // Recurse, but cap the recursion to one level, because we don't
1289         // want to waste time spinning around in loops.
1290         computeKnownBits(IncValue, KnownZero2, KnownOne2, MaxDepth - 1, Q);
1291         KnownZero &= KnownZero2;
1292         KnownOne &= KnownOne2;
1293         // If all bits have been ruled out, there's no need to check
1294         // more operands.
1295         if (!KnownZero && !KnownOne)
1296           break;
1297       }
1298     }
1299     break;
1300   }
1301   case Instruction::Call:
1302   case Instruction::Invoke:
1303     if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1304       computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
1305     // If a range metadata is attached to this IntrinsicInst, intersect the
1306     // explicit range specified by the metadata and the implicit range of
1307     // the intrinsic.
1308     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1309       switch (II->getIntrinsicID()) {
1310       default: break;
1311       case Intrinsic::bswap:
1312         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1313         KnownZero |= KnownZero2.byteSwap();
1314         KnownOne |= KnownOne2.byteSwap();
1315         break;
1316       case Intrinsic::ctlz:
1317       case Intrinsic::cttz: {
1318         unsigned LowBits = Log2_32(BitWidth)+1;
1319         // If this call is undefined for 0, the result will be less than 2^n.
1320         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1321           LowBits -= 1;
1322         KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1323         break;
1324       }
1325       case Intrinsic::ctpop: {
1326         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1327         // We can bound the space the count needs.  Also, bits known to be zero
1328         // can't contribute to the population.
1329         unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
1330         unsigned LeadingZeros =
1331           APInt(BitWidth, BitsPossiblySet).countLeadingZeros();
1332         assert(LeadingZeros <= BitWidth);
1333         KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros);
1334         KnownOne &= ~KnownZero;
1335         // TODO: we could bound KnownOne using the lower bound on the number
1336         // of bits which might be set provided by popcnt KnownOne2.
1337         break;
1338       }
1339       case Intrinsic::fabs: {
1340         Type *Ty = II->getType();
1341         APInt SignBit = APInt::getSignBit(Ty->getScalarSizeInBits());
1342         KnownZero |= APInt::getSplat(Ty->getPrimitiveSizeInBits(), SignBit);
1343         break;
1344       }
1345       case Intrinsic::x86_sse42_crc32_64_64:
1346         KnownZero |= APInt::getHighBitsSet(64, 32);
1347         break;
1348       }
1349     }
1350     break;
1351   case Instruction::ExtractValue:
1352     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1353       ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1354       if (EVI->getNumIndices() != 1) break;
1355       if (EVI->getIndices()[0] == 0) {
1356         switch (II->getIntrinsicID()) {
1357         default: break;
1358         case Intrinsic::uadd_with_overflow:
1359         case Intrinsic::sadd_with_overflow:
1360           computeKnownBitsAddSub(true, II->getArgOperand(0),
1361                                  II->getArgOperand(1), false, KnownZero,
1362                                  KnownOne, KnownZero2, KnownOne2, Depth, Q);
1363           break;
1364         case Intrinsic::usub_with_overflow:
1365         case Intrinsic::ssub_with_overflow:
1366           computeKnownBitsAddSub(false, II->getArgOperand(0),
1367                                  II->getArgOperand(1), false, KnownZero,
1368                                  KnownOne, KnownZero2, KnownOne2, Depth, Q);
1369           break;
1370         case Intrinsic::umul_with_overflow:
1371         case Intrinsic::smul_with_overflow:
1372           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1373                               KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1374                               Q);
1375           break;
1376         }
1377       }
1378     }
1379   }
1380 }
1381 
1382 /// Determine which bits of V are known to be either zero or one and return
1383 /// them in the KnownZero/KnownOne bit sets.
1384 ///
1385 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1386 /// we cannot optimize based on the assumption that it is zero without changing
1387 /// it to be an explicit zero.  If we don't change it to zero, other code could
1388 /// optimized based on the contradictory assumption that it is non-zero.
1389 /// Because instcombine aggressively folds operations with undef args anyway,
1390 /// this won't lose us code quality.
1391 ///
1392 /// This function is defined on values with integer type, values with pointer
1393 /// type, and vectors of integers.  In the case
1394 /// where V is a vector, known zero, and known one values are the
1395 /// same width as the vector element, and the bit is set only if it is true
1396 /// for all of the elements in the vector.
1397 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
1398                       unsigned Depth, const Query &Q) {
1399   assert(V && "No Value?");
1400   assert(Depth <= MaxDepth && "Limit Search Depth");
1401   unsigned BitWidth = KnownZero.getBitWidth();
1402 
1403   assert((V->getType()->isIntOrIntVectorTy() ||
1404           V->getType()->isFPOrFPVectorTy() ||
1405           V->getType()->getScalarType()->isPointerTy()) &&
1406          "Not integer, floating point, or pointer type!");
1407   assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1408          (!V->getType()->isIntOrIntVectorTy() ||
1409           V->getType()->getScalarSizeInBits() == BitWidth) &&
1410          KnownZero.getBitWidth() == BitWidth &&
1411          KnownOne.getBitWidth() == BitWidth &&
1412          "V, KnownOne and KnownZero should have same BitWidth");
1413 
1414   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1415     // We know all of the bits for a constant!
1416     KnownOne = CI->getValue();
1417     KnownZero = ~KnownOne;
1418     return;
1419   }
1420   // Null and aggregate-zero are all-zeros.
1421   if (isa<ConstantPointerNull>(V) ||
1422       isa<ConstantAggregateZero>(V)) {
1423     KnownOne.clearAllBits();
1424     KnownZero = APInt::getAllOnesValue(BitWidth);
1425     return;
1426   }
1427   // Handle a constant vector by taking the intersection of the known bits of
1428   // each element.  There is no real need to handle ConstantVector here, because
1429   // we don't handle undef in any particularly useful way.
1430   if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1431     // We know that CDS must be a vector of integers. Take the intersection of
1432     // each element.
1433     KnownZero.setAllBits(); KnownOne.setAllBits();
1434     APInt Elt(KnownZero.getBitWidth(), 0);
1435     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1436       Elt = CDS->getElementAsInteger(i);
1437       KnownZero &= ~Elt;
1438       KnownOne &= Elt;
1439     }
1440     return;
1441   }
1442 
1443   // Start out not knowing anything.
1444   KnownZero.clearAllBits(); KnownOne.clearAllBits();
1445 
1446   // Limit search depth.
1447   // All recursive calls that increase depth must come after this.
1448   if (Depth == MaxDepth)
1449     return;
1450 
1451   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1452   // the bits of its aliasee.
1453   if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1454     if (!GA->isInterposable())
1455       computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q);
1456     return;
1457   }
1458 
1459   if (Operator *I = dyn_cast<Operator>(V))
1460     computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q);
1461 
1462   // Aligned pointers have trailing zeros - refine KnownZero set
1463   if (V->getType()->isPointerTy()) {
1464     unsigned Align = V->getPointerAlignment(Q.DL);
1465     if (Align)
1466       KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1467   }
1468 
1469   // computeKnownBitsFromAssume strictly refines KnownZero and
1470   // KnownOne. Therefore, we run them after computeKnownBitsFromOperator.
1471 
1472   // Check whether a nearby assume intrinsic can determine some known bits.
1473   computeKnownBitsFromAssume(V, KnownZero, KnownOne, Depth, Q);
1474 
1475   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1476 }
1477 
1478 /// Determine whether the sign bit is known to be zero or one.
1479 /// Convenience wrapper around computeKnownBits.
1480 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
1481                     unsigned Depth, const Query &Q) {
1482   unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
1483   if (!BitWidth) {
1484     KnownZero = false;
1485     KnownOne = false;
1486     return;
1487   }
1488   APInt ZeroBits(BitWidth, 0);
1489   APInt OneBits(BitWidth, 0);
1490   computeKnownBits(V, ZeroBits, OneBits, Depth, Q);
1491   KnownOne = OneBits[BitWidth - 1];
1492   KnownZero = ZeroBits[BitWidth - 1];
1493 }
1494 
1495 /// Return true if the given value is known to have exactly one
1496 /// bit set when defined. For vectors return true if every element is known to
1497 /// be a power of two when defined. Supports values with integer or pointer
1498 /// types and vectors of integers.
1499 bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
1500                             const Query &Q) {
1501   if (Constant *C = dyn_cast<Constant>(V)) {
1502     if (C->isNullValue())
1503       return OrZero;
1504     if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1505       return CI->getValue().isPowerOf2();
1506     // TODO: Handle vector constants.
1507   }
1508 
1509   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1510   // it is shifted off the end then the result is undefined.
1511   if (match(V, m_Shl(m_One(), m_Value())))
1512     return true;
1513 
1514   // (signbit) >>l X is clearly a power of two if the one is not shifted off the
1515   // bottom.  If it is shifted off the bottom then the result is undefined.
1516   if (match(V, m_LShr(m_SignBit(), m_Value())))
1517     return true;
1518 
1519   // The remaining tests are all recursive, so bail out if we hit the limit.
1520   if (Depth++ == MaxDepth)
1521     return false;
1522 
1523   Value *X = nullptr, *Y = nullptr;
1524   // A shift left or a logical shift right of a power of two is a power of two
1525   // or zero.
1526   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1527                  match(V, m_LShr(m_Value(X), m_Value()))))
1528     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1529 
1530   if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1531     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1532 
1533   if (SelectInst *SI = dyn_cast<SelectInst>(V))
1534     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1535            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1536 
1537   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1538     // A power of two and'd with anything is a power of two or zero.
1539     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1540         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1541       return true;
1542     // X & (-X) is always a power of two or zero.
1543     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1544       return true;
1545     return false;
1546   }
1547 
1548   // Adding a power-of-two or zero to the same power-of-two or zero yields
1549   // either the original power-of-two, a larger power-of-two or zero.
1550   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1551     OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1552     if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1553       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1554           match(X, m_And(m_Value(), m_Specific(Y))))
1555         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1556           return true;
1557       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1558           match(Y, m_And(m_Value(), m_Specific(X))))
1559         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1560           return true;
1561 
1562       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1563       APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
1564       computeKnownBits(X, LHSZeroBits, LHSOneBits, Depth, Q);
1565 
1566       APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
1567       computeKnownBits(Y, RHSZeroBits, RHSOneBits, Depth, Q);
1568       // If i8 V is a power of two or zero:
1569       //  ZeroBits: 1 1 1 0 1 1 1 1
1570       // ~ZeroBits: 0 0 0 1 0 0 0 0
1571       if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
1572         // If OrZero isn't set, we cannot give back a zero result.
1573         // Make sure either the LHS or RHS has a bit set.
1574         if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
1575           return true;
1576     }
1577   }
1578 
1579   // An exact divide or right shift can only shift off zero bits, so the result
1580   // is a power of two only if the first operand is a power of two and not
1581   // copying a sign bit (sdiv int_min, 2).
1582   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1583       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1584     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1585                                   Depth, Q);
1586   }
1587 
1588   return false;
1589 }
1590 
1591 /// \brief Test whether a GEP's result is known to be non-null.
1592 ///
1593 /// Uses properties inherent in a GEP to try to determine whether it is known
1594 /// to be non-null.
1595 ///
1596 /// Currently this routine does not support vector GEPs.
1597 static bool isGEPKnownNonNull(GEPOperator *GEP, unsigned Depth,
1598                               const Query &Q) {
1599   if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1600     return false;
1601 
1602   // FIXME: Support vector-GEPs.
1603   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1604 
1605   // If the base pointer is non-null, we cannot walk to a null address with an
1606   // inbounds GEP in address space zero.
1607   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1608     return true;
1609 
1610   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1611   // If so, then the GEP cannot produce a null pointer, as doing so would
1612   // inherently violate the inbounds contract within address space zero.
1613   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1614        GTI != GTE; ++GTI) {
1615     // Struct types are easy -- they must always be indexed by a constant.
1616     if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1617       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1618       unsigned ElementIdx = OpC->getZExtValue();
1619       const StructLayout *SL = Q.DL.getStructLayout(STy);
1620       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1621       if (ElementOffset > 0)
1622         return true;
1623       continue;
1624     }
1625 
1626     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1627     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1628       continue;
1629 
1630     // Fast path the constant operand case both for efficiency and so we don't
1631     // increment Depth when just zipping down an all-constant GEP.
1632     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1633       if (!OpC->isZero())
1634         return true;
1635       continue;
1636     }
1637 
1638     // We post-increment Depth here because while isKnownNonZero increments it
1639     // as well, when we pop back up that increment won't persist. We don't want
1640     // to recurse 10k times just because we have 10k GEP operands. We don't
1641     // bail completely out because we want to handle constant GEPs regardless
1642     // of depth.
1643     if (Depth++ >= MaxDepth)
1644       continue;
1645 
1646     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1647       return true;
1648   }
1649 
1650   return false;
1651 }
1652 
1653 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1654 /// ensure that the value it's attached to is never Value?  'RangeType' is
1655 /// is the type of the value described by the range.
1656 static bool rangeMetadataExcludesValue(MDNode* Ranges,
1657                                        const APInt& Value) {
1658   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1659   assert(NumRanges >= 1);
1660   for (unsigned i = 0; i < NumRanges; ++i) {
1661     ConstantInt *Lower =
1662         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1663     ConstantInt *Upper =
1664         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1665     ConstantRange Range(Lower->getValue(), Upper->getValue());
1666     if (Range.contains(Value))
1667       return false;
1668   }
1669   return true;
1670 }
1671 
1672 /// Return true if the given value is known to be non-zero when defined.
1673 /// For vectors return true if every element is known to be non-zero when
1674 /// defined. Supports values with integer or pointer type and vectors of
1675 /// integers.
1676 bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q) {
1677   if (Constant *C = dyn_cast<Constant>(V)) {
1678     if (C->isNullValue())
1679       return false;
1680     if (isa<ConstantInt>(C))
1681       // Must be non-zero due to null test above.
1682       return true;
1683     // TODO: Handle vectors
1684     return false;
1685   }
1686 
1687   if (Instruction* I = dyn_cast<Instruction>(V)) {
1688     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1689       // If the possible ranges don't contain zero, then the value is
1690       // definitely non-zero.
1691       if (IntegerType* Ty = dyn_cast<IntegerType>(V->getType())) {
1692         const APInt ZeroValue(Ty->getBitWidth(), 0);
1693         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1694           return true;
1695       }
1696     }
1697   }
1698 
1699   // The remaining tests are all recursive, so bail out if we hit the limit.
1700   if (Depth++ >= MaxDepth)
1701     return false;
1702 
1703   // Check for pointer simplifications.
1704   if (V->getType()->isPointerTy()) {
1705     if (isKnownNonNull(V))
1706       return true;
1707     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1708       if (isGEPKnownNonNull(GEP, Depth, Q))
1709         return true;
1710   }
1711 
1712   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1713 
1714   // X | Y != 0 if X != 0 or Y != 0.
1715   Value *X = nullptr, *Y = nullptr;
1716   if (match(V, m_Or(m_Value(X), m_Value(Y))))
1717     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1718 
1719   // ext X != 0 if X != 0.
1720   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1721     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1722 
1723   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
1724   // if the lowest bit is shifted off the end.
1725   if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1726     // shl nuw can't remove any non-zero bits.
1727     OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1728     if (BO->hasNoUnsignedWrap())
1729       return isKnownNonZero(X, Depth, Q);
1730 
1731     APInt KnownZero(BitWidth, 0);
1732     APInt KnownOne(BitWidth, 0);
1733     computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1734     if (KnownOne[0])
1735       return true;
1736   }
1737   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
1738   // defined if the sign bit is shifted off the end.
1739   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1740     // shr exact can only shift out zero bits.
1741     PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1742     if (BO->isExact())
1743       return isKnownNonZero(X, Depth, Q);
1744 
1745     bool XKnownNonNegative, XKnownNegative;
1746     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1747     if (XKnownNegative)
1748       return true;
1749 
1750     // If the shifter operand is a constant, and all of the bits shifted
1751     // out are known to be zero, and X is known non-zero then at least one
1752     // non-zero bit must remain.
1753     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1754       APInt KnownZero(BitWidth, 0);
1755       APInt KnownOne(BitWidth, 0);
1756       computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1757 
1758       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1759       // Is there a known one in the portion not shifted out?
1760       if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
1761         return true;
1762       // Are all the bits to be shifted out known zero?
1763       if (KnownZero.countTrailingOnes() >= ShiftVal)
1764         return isKnownNonZero(X, Depth, Q);
1765     }
1766   }
1767   // div exact can only produce a zero if the dividend is zero.
1768   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1769     return isKnownNonZero(X, Depth, Q);
1770   }
1771   // X + Y.
1772   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1773     bool XKnownNonNegative, XKnownNegative;
1774     bool YKnownNonNegative, YKnownNegative;
1775     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1776     ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Depth, Q);
1777 
1778     // If X and Y are both non-negative (as signed values) then their sum is not
1779     // zero unless both X and Y are zero.
1780     if (XKnownNonNegative && YKnownNonNegative)
1781       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1782         return true;
1783 
1784     // If X and Y are both negative (as signed values) then their sum is not
1785     // zero unless both X and Y equal INT_MIN.
1786     if (BitWidth && XKnownNegative && YKnownNegative) {
1787       APInt KnownZero(BitWidth, 0);
1788       APInt KnownOne(BitWidth, 0);
1789       APInt Mask = APInt::getSignedMaxValue(BitWidth);
1790       // The sign bit of X is set.  If some other bit is set then X is not equal
1791       // to INT_MIN.
1792       computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1793       if ((KnownOne & Mask) != 0)
1794         return true;
1795       // The sign bit of Y is set.  If some other bit is set then Y is not equal
1796       // to INT_MIN.
1797       computeKnownBits(Y, KnownZero, KnownOne, Depth, Q);
1798       if ((KnownOne & Mask) != 0)
1799         return true;
1800     }
1801 
1802     // The sum of a non-negative number and a power of two is not zero.
1803     if (XKnownNonNegative &&
1804         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1805       return true;
1806     if (YKnownNonNegative &&
1807         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1808       return true;
1809   }
1810   // X * Y.
1811   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1812     OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1813     // If X and Y are non-zero then so is X * Y as long as the multiplication
1814     // does not overflow.
1815     if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1816         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1817       return true;
1818   }
1819   // (C ? X : Y) != 0 if X != 0 and Y != 0.
1820   else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1821     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1822         isKnownNonZero(SI->getFalseValue(), Depth, Q))
1823       return true;
1824   }
1825   // PHI
1826   else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1827     // Try and detect a recurrence that monotonically increases from a
1828     // starting value, as these are common as induction variables.
1829     if (PN->getNumIncomingValues() == 2) {
1830       Value *Start = PN->getIncomingValue(0);
1831       Value *Induction = PN->getIncomingValue(1);
1832       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1833         std::swap(Start, Induction);
1834       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1835         if (!C->isZero() && !C->isNegative()) {
1836           ConstantInt *X;
1837           if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1838                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1839               !X->isNegative())
1840             return true;
1841         }
1842       }
1843     }
1844     // Check if all incoming values are non-zero constant.
1845     bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
1846       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue();
1847     });
1848     if (AllNonZeroConstants)
1849       return true;
1850   }
1851 
1852   if (!BitWidth) return false;
1853   APInt KnownZero(BitWidth, 0);
1854   APInt KnownOne(BitWidth, 0);
1855   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1856   return KnownOne != 0;
1857 }
1858 
1859 /// Return true if V2 == V1 + X, where X is known non-zero.
1860 static bool isAddOfNonZero(Value *V1, Value *V2, const Query &Q) {
1861   BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
1862   if (!BO || BO->getOpcode() != Instruction::Add)
1863     return false;
1864   Value *Op = nullptr;
1865   if (V2 == BO->getOperand(0))
1866     Op = BO->getOperand(1);
1867   else if (V2 == BO->getOperand(1))
1868     Op = BO->getOperand(0);
1869   else
1870     return false;
1871   return isKnownNonZero(Op, 0, Q);
1872 }
1873 
1874 /// Return true if it is known that V1 != V2.
1875 static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q) {
1876   if (V1->getType()->isVectorTy() || V1 == V2)
1877     return false;
1878   if (V1->getType() != V2->getType())
1879     // We can't look through casts yet.
1880     return false;
1881   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
1882     return true;
1883 
1884   if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) {
1885     // Are any known bits in V1 contradictory to known bits in V2? If V1
1886     // has a known zero where V2 has a known one, they must not be equal.
1887     auto BitWidth = Ty->getBitWidth();
1888     APInt KnownZero1(BitWidth, 0);
1889     APInt KnownOne1(BitWidth, 0);
1890     computeKnownBits(V1, KnownZero1, KnownOne1, 0, Q);
1891     APInt KnownZero2(BitWidth, 0);
1892     APInt KnownOne2(BitWidth, 0);
1893     computeKnownBits(V2, KnownZero2, KnownOne2, 0, Q);
1894 
1895     auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1);
1896     if (OppositeBits.getBoolValue())
1897       return true;
1898   }
1899   return false;
1900 }
1901 
1902 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
1903 /// simplify operations downstream. Mask is known to be zero for bits that V
1904 /// cannot have.
1905 ///
1906 /// This function is defined on values with integer type, values with pointer
1907 /// type, and vectors of integers.  In the case
1908 /// where V is a vector, the mask, known zero, and known one values are the
1909 /// same width as the vector element, and the bit is set only if it is true
1910 /// for all of the elements in the vector.
1911 bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth,
1912                        const Query &Q) {
1913   APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
1914   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1915   return (KnownZero & Mask) == Mask;
1916 }
1917 
1918 
1919 
1920 /// Return the number of times the sign bit of the register is replicated into
1921 /// the other bits. We know that at least 1 bit is always equal to the sign bit
1922 /// (itself), but other cases can give us information. For example, immediately
1923 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
1924 /// other, so we return 3.
1925 ///
1926 /// 'Op' must have a scalar integer type.
1927 ///
1928 unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q) {
1929   unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
1930   unsigned Tmp, Tmp2;
1931   unsigned FirstAnswer = 1;
1932 
1933   // Note that ConstantInt is handled by the general computeKnownBits case
1934   // below.
1935 
1936   if (Depth == 6)
1937     return 1;  // Limit search depth.
1938 
1939   Operator *U = dyn_cast<Operator>(V);
1940   switch (Operator::getOpcode(V)) {
1941   default: break;
1942   case Instruction::SExt:
1943     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
1944     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
1945 
1946   case Instruction::SDiv: {
1947     const APInt *Denominator;
1948     // sdiv X, C -> adds log(C) sign bits.
1949     if (match(U->getOperand(1), m_APInt(Denominator))) {
1950 
1951       // Ignore non-positive denominator.
1952       if (!Denominator->isStrictlyPositive())
1953         break;
1954 
1955       // Calculate the incoming numerator bits.
1956       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
1957 
1958       // Add floor(log(C)) bits to the numerator bits.
1959       return std::min(TyBits, NumBits + Denominator->logBase2());
1960     }
1961     break;
1962   }
1963 
1964   case Instruction::SRem: {
1965     const APInt *Denominator;
1966     // srem X, C -> we know that the result is within [-C+1,C) when C is a
1967     // positive constant.  This let us put a lower bound on the number of sign
1968     // bits.
1969     if (match(U->getOperand(1), m_APInt(Denominator))) {
1970 
1971       // Ignore non-positive denominator.
1972       if (!Denominator->isStrictlyPositive())
1973         break;
1974 
1975       // Calculate the incoming numerator bits. SRem by a positive constant
1976       // can't lower the number of sign bits.
1977       unsigned NumrBits =
1978           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
1979 
1980       // Calculate the leading sign bit constraints by examining the
1981       // denominator.  Given that the denominator is positive, there are two
1982       // cases:
1983       //
1984       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
1985       //     (1 << ceilLogBase2(C)).
1986       //
1987       //  2. the numerator is negative.  Then the result range is (-C,0] and
1988       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
1989       //
1990       // Thus a lower bound on the number of sign bits is `TyBits -
1991       // ceilLogBase2(C)`.
1992 
1993       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
1994       return std::max(NumrBits, ResBits);
1995     }
1996     break;
1997   }
1998 
1999   case Instruction::AShr: {
2000     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2001     // ashr X, C   -> adds C sign bits.  Vectors too.
2002     const APInt *ShAmt;
2003     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2004       Tmp += ShAmt->getZExtValue();
2005       if (Tmp > TyBits) Tmp = TyBits;
2006     }
2007     return Tmp;
2008   }
2009   case Instruction::Shl: {
2010     const APInt *ShAmt;
2011     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2012       // shl destroys sign bits.
2013       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2014       Tmp2 = ShAmt->getZExtValue();
2015       if (Tmp2 >= TyBits ||      // Bad shift.
2016           Tmp2 >= Tmp) break;    // Shifted all sign bits out.
2017       return Tmp - Tmp2;
2018     }
2019     break;
2020   }
2021   case Instruction::And:
2022   case Instruction::Or:
2023   case Instruction::Xor:    // NOT is handled here.
2024     // Logical binary ops preserve the number of sign bits at the worst.
2025     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2026     if (Tmp != 1) {
2027       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2028       FirstAnswer = std::min(Tmp, Tmp2);
2029       // We computed what we know about the sign bits as our first
2030       // answer. Now proceed to the generic code that uses
2031       // computeKnownBits, and pick whichever answer is better.
2032     }
2033     break;
2034 
2035   case Instruction::Select:
2036     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2037     if (Tmp == 1) return 1;  // Early out.
2038     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2039     return std::min(Tmp, Tmp2);
2040 
2041   case Instruction::Add:
2042     // Add can have at most one carry bit.  Thus we know that the output
2043     // is, at worst, one more bit than the inputs.
2044     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2045     if (Tmp == 1) return 1;  // Early out.
2046 
2047     // Special case decrementing a value (ADD X, -1):
2048     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2049       if (CRHS->isAllOnesValue()) {
2050         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2051         computeKnownBits(U->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
2052 
2053         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2054         // sign bits set.
2055         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2056           return TyBits;
2057 
2058         // If we are subtracting one from a positive number, there is no carry
2059         // out of the result.
2060         if (KnownZero.isNegative())
2061           return Tmp;
2062       }
2063 
2064     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2065     if (Tmp2 == 1) return 1;
2066     return std::min(Tmp, Tmp2)-1;
2067 
2068   case Instruction::Sub:
2069     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2070     if (Tmp2 == 1) return 1;
2071 
2072     // Handle NEG.
2073     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2074       if (CLHS->isNullValue()) {
2075         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2076         computeKnownBits(U->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
2077         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2078         // sign bits set.
2079         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2080           return TyBits;
2081 
2082         // If the input is known to be positive (the sign bit is known clear),
2083         // the output of the NEG has the same number of sign bits as the input.
2084         if (KnownZero.isNegative())
2085           return Tmp2;
2086 
2087         // Otherwise, we treat this like a SUB.
2088       }
2089 
2090     // Sub can have at most one carry bit.  Thus we know that the output
2091     // is, at worst, one more bit than the inputs.
2092     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2093     if (Tmp == 1) return 1;  // Early out.
2094     return std::min(Tmp, Tmp2)-1;
2095 
2096   case Instruction::PHI: {
2097     PHINode *PN = cast<PHINode>(U);
2098     unsigned NumIncomingValues = PN->getNumIncomingValues();
2099     // Don't analyze large in-degree PHIs.
2100     if (NumIncomingValues > 4) break;
2101     // Unreachable blocks may have zero-operand PHI nodes.
2102     if (NumIncomingValues == 0) break;
2103 
2104     // Take the minimum of all incoming values.  This can't infinitely loop
2105     // because of our depth threshold.
2106     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2107     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2108       if (Tmp == 1) return Tmp;
2109       Tmp = std::min(
2110           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2111     }
2112     return Tmp;
2113   }
2114 
2115   case Instruction::Trunc:
2116     // FIXME: it's tricky to do anything useful for this, but it is an important
2117     // case for targets like X86.
2118     break;
2119   }
2120 
2121   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2122   // use this information.
2123   APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2124   APInt Mask;
2125   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2126 
2127   if (KnownZero.isNegative()) {        // sign bit is 0
2128     Mask = KnownZero;
2129   } else if (KnownOne.isNegative()) {  // sign bit is 1;
2130     Mask = KnownOne;
2131   } else {
2132     // Nothing known.
2133     return FirstAnswer;
2134   }
2135 
2136   // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
2137   // the number of identical bits in the top of the input value.
2138   Mask = ~Mask;
2139   Mask <<= Mask.getBitWidth()-TyBits;
2140   // Return # leading zeros.  We use 'min' here in case Val was zero before
2141   // shifting.  We don't want to return '64' as for an i32 "0".
2142   return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros()));
2143 }
2144 
2145 /// This function computes the integer multiple of Base that equals V.
2146 /// If successful, it returns true and returns the multiple in
2147 /// Multiple. If unsuccessful, it returns false. It looks
2148 /// through SExt instructions only if LookThroughSExt is true.
2149 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2150                            bool LookThroughSExt, unsigned Depth) {
2151   const unsigned MaxDepth = 6;
2152 
2153   assert(V && "No Value?");
2154   assert(Depth <= MaxDepth && "Limit Search Depth");
2155   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2156 
2157   Type *T = V->getType();
2158 
2159   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2160 
2161   if (Base == 0)
2162     return false;
2163 
2164   if (Base == 1) {
2165     Multiple = V;
2166     return true;
2167   }
2168 
2169   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2170   Constant *BaseVal = ConstantInt::get(T, Base);
2171   if (CO && CO == BaseVal) {
2172     // Multiple is 1.
2173     Multiple = ConstantInt::get(T, 1);
2174     return true;
2175   }
2176 
2177   if (CI && CI->getZExtValue() % Base == 0) {
2178     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2179     return true;
2180   }
2181 
2182   if (Depth == MaxDepth) return false;  // Limit search depth.
2183 
2184   Operator *I = dyn_cast<Operator>(V);
2185   if (!I) return false;
2186 
2187   switch (I->getOpcode()) {
2188   default: break;
2189   case Instruction::SExt:
2190     if (!LookThroughSExt) return false;
2191     // otherwise fall through to ZExt
2192   case Instruction::ZExt:
2193     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2194                            LookThroughSExt, Depth+1);
2195   case Instruction::Shl:
2196   case Instruction::Mul: {
2197     Value *Op0 = I->getOperand(0);
2198     Value *Op1 = I->getOperand(1);
2199 
2200     if (I->getOpcode() == Instruction::Shl) {
2201       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2202       if (!Op1CI) return false;
2203       // Turn Op0 << Op1 into Op0 * 2^Op1
2204       APInt Op1Int = Op1CI->getValue();
2205       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2206       APInt API(Op1Int.getBitWidth(), 0);
2207       API.setBit(BitToSet);
2208       Op1 = ConstantInt::get(V->getContext(), API);
2209     }
2210 
2211     Value *Mul0 = nullptr;
2212     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2213       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2214         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2215           if (Op1C->getType()->getPrimitiveSizeInBits() <
2216               MulC->getType()->getPrimitiveSizeInBits())
2217             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2218           if (Op1C->getType()->getPrimitiveSizeInBits() >
2219               MulC->getType()->getPrimitiveSizeInBits())
2220             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2221 
2222           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2223           Multiple = ConstantExpr::getMul(MulC, Op1C);
2224           return true;
2225         }
2226 
2227       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2228         if (Mul0CI->getValue() == 1) {
2229           // V == Base * Op1, so return Op1
2230           Multiple = Op1;
2231           return true;
2232         }
2233     }
2234 
2235     Value *Mul1 = nullptr;
2236     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2237       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2238         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2239           if (Op0C->getType()->getPrimitiveSizeInBits() <
2240               MulC->getType()->getPrimitiveSizeInBits())
2241             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2242           if (Op0C->getType()->getPrimitiveSizeInBits() >
2243               MulC->getType()->getPrimitiveSizeInBits())
2244             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2245 
2246           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2247           Multiple = ConstantExpr::getMul(MulC, Op0C);
2248           return true;
2249         }
2250 
2251       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2252         if (Mul1CI->getValue() == 1) {
2253           // V == Base * Op0, so return Op0
2254           Multiple = Op0;
2255           return true;
2256         }
2257     }
2258   }
2259   }
2260 
2261   // We could not determine if V is a multiple of Base.
2262   return false;
2263 }
2264 
2265 /// Return true if we can prove that the specified FP value is never equal to
2266 /// -0.0.
2267 ///
2268 /// NOTE: this function will need to be revisited when we support non-default
2269 /// rounding modes!
2270 ///
2271 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2272                                 unsigned Depth) {
2273   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2274     return !CFP->getValueAPF().isNegZero();
2275 
2276   // FIXME: Magic number! At the least, this should be given a name because it's
2277   // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to
2278   // expose it as a parameter, so it can be used for testing / experimenting.
2279   if (Depth == 6)
2280     return false;  // Limit search depth.
2281 
2282   const Operator *I = dyn_cast<Operator>(V);
2283   if (!I) return false;
2284 
2285   // Check if the nsz fast-math flag is set
2286   if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2287     if (FPO->hasNoSignedZeros())
2288       return true;
2289 
2290   // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2291   if (I->getOpcode() == Instruction::FAdd)
2292     if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2293       if (CFP->isNullValue())
2294         return true;
2295 
2296   // sitofp and uitofp turn into +0.0 for zero.
2297   if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2298     return true;
2299 
2300   if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2301     Intrinsic::ID IID = getIntrinsicIDForCall(CI, TLI);
2302     switch (IID) {
2303     default:
2304       break;
2305     // sqrt(-0.0) = -0.0, no other negative results are possible.
2306     case Intrinsic::sqrt:
2307       return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1);
2308     // fabs(x) != -0.0
2309     case Intrinsic::fabs:
2310       return true;
2311     }
2312   }
2313 
2314   return false;
2315 }
2316 
2317 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2318                                        const TargetLibraryInfo *TLI,
2319                                        unsigned Depth) {
2320   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2321     return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
2322 
2323   // FIXME: Magic number! At the least, this should be given a name because it's
2324   // used similarly in CannotBeNegativeZero(). A better fix may be to
2325   // expose it as a parameter, so it can be used for testing / experimenting.
2326   if (Depth == 6)
2327     return false;  // Limit search depth.
2328 
2329   const Operator *I = dyn_cast<Operator>(V);
2330   if (!I) return false;
2331 
2332   switch (I->getOpcode()) {
2333   default: break;
2334   // Unsigned integers are always nonnegative.
2335   case Instruction::UIToFP:
2336     return true;
2337   case Instruction::FMul:
2338     // x*x is always non-negative or a NaN.
2339     if (I->getOperand(0) == I->getOperand(1))
2340       return true;
2341     // Fall through
2342   case Instruction::FAdd:
2343   case Instruction::FDiv:
2344   case Instruction::FRem:
2345     return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
2346            CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2347   case Instruction::Select:
2348     return CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1) &&
2349            CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
2350   case Instruction::FPExt:
2351   case Instruction::FPTrunc:
2352     // Widening/narrowing never change sign.
2353     return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
2354   case Instruction::Call:
2355     Intrinsic::ID IID = getIntrinsicIDForCall(cast<CallInst>(I), TLI);
2356     switch (IID) {
2357     default:
2358       break;
2359     case Intrinsic::maxnum:
2360       return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) ||
2361              CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2362     case Intrinsic::minnum:
2363       return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
2364              CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2365     case Intrinsic::exp:
2366     case Intrinsic::exp2:
2367     case Intrinsic::fabs:
2368     case Intrinsic::sqrt:
2369       return true;
2370     case Intrinsic::powi:
2371       if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
2372         // powi(x,n) is non-negative if n is even.
2373         if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
2374           return true;
2375       }
2376       return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
2377     case Intrinsic::fma:
2378     case Intrinsic::fmuladd:
2379       // x*x+y is non-negative if y is non-negative.
2380       return I->getOperand(0) == I->getOperand(1) &&
2381              CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
2382     }
2383     break;
2384   }
2385   return false;
2386 }
2387 
2388 /// If the specified value can be set by repeating the same byte in memory,
2389 /// return the i8 value that it is represented with.  This is
2390 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2391 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
2392 /// byte store (e.g. i16 0x1234), return null.
2393 Value *llvm::isBytewiseValue(Value *V) {
2394   // All byte-wide stores are splatable, even of arbitrary variables.
2395   if (V->getType()->isIntegerTy(8)) return V;
2396 
2397   // Handle 'null' ConstantArrayZero etc.
2398   if (Constant *C = dyn_cast<Constant>(V))
2399     if (C->isNullValue())
2400       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2401 
2402   // Constant float and double values can be handled as integer values if the
2403   // corresponding integer value is "byteable".  An important case is 0.0.
2404   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2405     if (CFP->getType()->isFloatTy())
2406       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2407     if (CFP->getType()->isDoubleTy())
2408       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2409     // Don't handle long double formats, which have strange constraints.
2410   }
2411 
2412   // We can handle constant integers that are multiple of 8 bits.
2413   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2414     if (CI->getBitWidth() % 8 == 0) {
2415       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2416 
2417       if (!CI->getValue().isSplat(8))
2418         return nullptr;
2419       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2420     }
2421   }
2422 
2423   // A ConstantDataArray/Vector is splatable if all its members are equal and
2424   // also splatable.
2425   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2426     Value *Elt = CA->getElementAsConstant(0);
2427     Value *Val = isBytewiseValue(Elt);
2428     if (!Val)
2429       return nullptr;
2430 
2431     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2432       if (CA->getElementAsConstant(I) != Elt)
2433         return nullptr;
2434 
2435     return Val;
2436   }
2437 
2438   // Conceptually, we could handle things like:
2439   //   %a = zext i8 %X to i16
2440   //   %b = shl i16 %a, 8
2441   //   %c = or i16 %a, %b
2442   // but until there is an example that actually needs this, it doesn't seem
2443   // worth worrying about.
2444   return nullptr;
2445 }
2446 
2447 
2448 // This is the recursive version of BuildSubAggregate. It takes a few different
2449 // arguments. Idxs is the index within the nested struct From that we are
2450 // looking at now (which is of type IndexedType). IdxSkip is the number of
2451 // indices from Idxs that should be left out when inserting into the resulting
2452 // struct. To is the result struct built so far, new insertvalue instructions
2453 // build on that.
2454 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2455                                 SmallVectorImpl<unsigned> &Idxs,
2456                                 unsigned IdxSkip,
2457                                 Instruction *InsertBefore) {
2458   llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2459   if (STy) {
2460     // Save the original To argument so we can modify it
2461     Value *OrigTo = To;
2462     // General case, the type indexed by Idxs is a struct
2463     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2464       // Process each struct element recursively
2465       Idxs.push_back(i);
2466       Value *PrevTo = To;
2467       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2468                              InsertBefore);
2469       Idxs.pop_back();
2470       if (!To) {
2471         // Couldn't find any inserted value for this index? Cleanup
2472         while (PrevTo != OrigTo) {
2473           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2474           PrevTo = Del->getAggregateOperand();
2475           Del->eraseFromParent();
2476         }
2477         // Stop processing elements
2478         break;
2479       }
2480     }
2481     // If we successfully found a value for each of our subaggregates
2482     if (To)
2483       return To;
2484   }
2485   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2486   // the struct's elements had a value that was inserted directly. In the latter
2487   // case, perhaps we can't determine each of the subelements individually, but
2488   // we might be able to find the complete struct somewhere.
2489 
2490   // Find the value that is at that particular spot
2491   Value *V = FindInsertedValue(From, Idxs);
2492 
2493   if (!V)
2494     return nullptr;
2495 
2496   // Insert the value in the new (sub) aggregrate
2497   return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2498                                        "tmp", InsertBefore);
2499 }
2500 
2501 // This helper takes a nested struct and extracts a part of it (which is again a
2502 // struct) into a new value. For example, given the struct:
2503 // { a, { b, { c, d }, e } }
2504 // and the indices "1, 1" this returns
2505 // { c, d }.
2506 //
2507 // It does this by inserting an insertvalue for each element in the resulting
2508 // struct, as opposed to just inserting a single struct. This will only work if
2509 // each of the elements of the substruct are known (ie, inserted into From by an
2510 // insertvalue instruction somewhere).
2511 //
2512 // All inserted insertvalue instructions are inserted before InsertBefore
2513 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2514                                 Instruction *InsertBefore) {
2515   assert(InsertBefore && "Must have someplace to insert!");
2516   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2517                                                              idx_range);
2518   Value *To = UndefValue::get(IndexedType);
2519   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2520   unsigned IdxSkip = Idxs.size();
2521 
2522   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2523 }
2524 
2525 /// Given an aggregrate and an sequence of indices, see if
2526 /// the scalar value indexed is already around as a register, for example if it
2527 /// were inserted directly into the aggregrate.
2528 ///
2529 /// If InsertBefore is not null, this function will duplicate (modified)
2530 /// insertvalues when a part of a nested struct is extracted.
2531 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2532                                Instruction *InsertBefore) {
2533   // Nothing to index? Just return V then (this is useful at the end of our
2534   // recursion).
2535   if (idx_range.empty())
2536     return V;
2537   // We have indices, so V should have an indexable type.
2538   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2539          "Not looking at a struct or array?");
2540   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2541          "Invalid indices for type?");
2542 
2543   if (Constant *C = dyn_cast<Constant>(V)) {
2544     C = C->getAggregateElement(idx_range[0]);
2545     if (!C) return nullptr;
2546     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2547   }
2548 
2549   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2550     // Loop the indices for the insertvalue instruction in parallel with the
2551     // requested indices
2552     const unsigned *req_idx = idx_range.begin();
2553     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2554          i != e; ++i, ++req_idx) {
2555       if (req_idx == idx_range.end()) {
2556         // We can't handle this without inserting insertvalues
2557         if (!InsertBefore)
2558           return nullptr;
2559 
2560         // The requested index identifies a part of a nested aggregate. Handle
2561         // this specially. For example,
2562         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2563         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2564         // %C = extractvalue {i32, { i32, i32 } } %B, 1
2565         // This can be changed into
2566         // %A = insertvalue {i32, i32 } undef, i32 10, 0
2567         // %C = insertvalue {i32, i32 } %A, i32 11, 1
2568         // which allows the unused 0,0 element from the nested struct to be
2569         // removed.
2570         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2571                                  InsertBefore);
2572       }
2573 
2574       // This insert value inserts something else than what we are looking for.
2575       // See if the (aggregate) value inserted into has the value we are
2576       // looking for, then.
2577       if (*req_idx != *i)
2578         return FindInsertedValue(I->getAggregateOperand(), idx_range,
2579                                  InsertBefore);
2580     }
2581     // If we end up here, the indices of the insertvalue match with those
2582     // requested (though possibly only partially). Now we recursively look at
2583     // the inserted value, passing any remaining indices.
2584     return FindInsertedValue(I->getInsertedValueOperand(),
2585                              makeArrayRef(req_idx, idx_range.end()),
2586                              InsertBefore);
2587   }
2588 
2589   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2590     // If we're extracting a value from an aggregate that was extracted from
2591     // something else, we can extract from that something else directly instead.
2592     // However, we will need to chain I's indices with the requested indices.
2593 
2594     // Calculate the number of indices required
2595     unsigned size = I->getNumIndices() + idx_range.size();
2596     // Allocate some space to put the new indices in
2597     SmallVector<unsigned, 5> Idxs;
2598     Idxs.reserve(size);
2599     // Add indices from the extract value instruction
2600     Idxs.append(I->idx_begin(), I->idx_end());
2601 
2602     // Add requested indices
2603     Idxs.append(idx_range.begin(), idx_range.end());
2604 
2605     assert(Idxs.size() == size
2606            && "Number of indices added not correct?");
2607 
2608     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2609   }
2610   // Otherwise, we don't know (such as, extracting from a function return value
2611   // or load instruction)
2612   return nullptr;
2613 }
2614 
2615 /// Analyze the specified pointer to see if it can be expressed as a base
2616 /// pointer plus a constant offset. Return the base and offset to the caller.
2617 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2618                                               const DataLayout &DL) {
2619   unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2620   APInt ByteOffset(BitWidth, 0);
2621 
2622   // We walk up the defs but use a visited set to handle unreachable code. In
2623   // that case, we stop after accumulating the cycle once (not that it
2624   // matters).
2625   SmallPtrSet<Value *, 16> Visited;
2626   while (Visited.insert(Ptr).second) {
2627     if (Ptr->getType()->isVectorTy())
2628       break;
2629 
2630     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2631       APInt GEPOffset(BitWidth, 0);
2632       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2633         break;
2634 
2635       ByteOffset += GEPOffset;
2636 
2637       Ptr = GEP->getPointerOperand();
2638     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2639                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2640       Ptr = cast<Operator>(Ptr)->getOperand(0);
2641     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2642       if (GA->isInterposable())
2643         break;
2644       Ptr = GA->getAliasee();
2645     } else {
2646       break;
2647     }
2648   }
2649   Offset = ByteOffset.getSExtValue();
2650   return Ptr;
2651 }
2652 
2653 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) {
2654   // Make sure the GEP has exactly three arguments.
2655   if (GEP->getNumOperands() != 3)
2656     return false;
2657 
2658   // Make sure the index-ee is a pointer to array of i8.
2659   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
2660   if (!AT || !AT->getElementType()->isIntegerTy(8))
2661     return false;
2662 
2663   // Check to make sure that the first operand of the GEP is an integer and
2664   // has value 0 so that we are sure we're indexing into the initializer.
2665   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
2666   if (!FirstIdx || !FirstIdx->isZero())
2667     return false;
2668 
2669   return true;
2670 }
2671 
2672 /// This function computes the length of a null-terminated C string pointed to
2673 /// by V. If successful, it returns true and returns the string in Str.
2674 /// If unsuccessful, it returns false.
2675 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
2676                                  uint64_t Offset, bool TrimAtNul) {
2677   assert(V);
2678 
2679   // Look through bitcast instructions and geps.
2680   V = V->stripPointerCasts();
2681 
2682   // If the value is a GEP instruction or constant expression, treat it as an
2683   // offset.
2684   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2685     // The GEP operator should be based on a pointer to string constant, and is
2686     // indexing into the string constant.
2687     if (!isGEPBasedOnPointerToString(GEP))
2688       return false;
2689 
2690     // If the second index isn't a ConstantInt, then this is a variable index
2691     // into the array.  If this occurs, we can't say anything meaningful about
2692     // the string.
2693     uint64_t StartIdx = 0;
2694     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
2695       StartIdx = CI->getZExtValue();
2696     else
2697       return false;
2698     return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
2699                                  TrimAtNul);
2700   }
2701 
2702   // The GEP instruction, constant or instruction, must reference a global
2703   // variable that is a constant and is initialized. The referenced constant
2704   // initializer is the array that we'll use for optimization.
2705   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2706   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
2707     return false;
2708 
2709   // Handle the all-zeros case
2710   if (GV->getInitializer()->isNullValue()) {
2711     // This is a degenerate case. The initializer is constant zero so the
2712     // length of the string must be zero.
2713     Str = "";
2714     return true;
2715   }
2716 
2717   // Must be a Constant Array
2718   const ConstantDataArray *Array =
2719     dyn_cast<ConstantDataArray>(GV->getInitializer());
2720   if (!Array || !Array->isString())
2721     return false;
2722 
2723   // Get the number of elements in the array
2724   uint64_t NumElts = Array->getType()->getArrayNumElements();
2725 
2726   // Start out with the entire array in the StringRef.
2727   Str = Array->getAsString();
2728 
2729   if (Offset > NumElts)
2730     return false;
2731 
2732   // Skip over 'offset' bytes.
2733   Str = Str.substr(Offset);
2734 
2735   if (TrimAtNul) {
2736     // Trim off the \0 and anything after it.  If the array is not nul
2737     // terminated, we just return the whole end of string.  The client may know
2738     // some other way that the string is length-bound.
2739     Str = Str.substr(0, Str.find('\0'));
2740   }
2741   return true;
2742 }
2743 
2744 // These next two are very similar to the above, but also look through PHI
2745 // nodes.
2746 // TODO: See if we can integrate these two together.
2747 
2748 /// If we can compute the length of the string pointed to by
2749 /// the specified pointer, return 'len+1'.  If we can't, return 0.
2750 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) {
2751   // Look through noop bitcast instructions.
2752   V = V->stripPointerCasts();
2753 
2754   // If this is a PHI node, there are two cases: either we have already seen it
2755   // or we haven't.
2756   if (PHINode *PN = dyn_cast<PHINode>(V)) {
2757     if (!PHIs.insert(PN).second)
2758       return ~0ULL;  // already in the set.
2759 
2760     // If it was new, see if all the input strings are the same length.
2761     uint64_t LenSoFar = ~0ULL;
2762     for (Value *IncValue : PN->incoming_values()) {
2763       uint64_t Len = GetStringLengthH(IncValue, PHIs);
2764       if (Len == 0) return 0; // Unknown length -> unknown.
2765 
2766       if (Len == ~0ULL) continue;
2767 
2768       if (Len != LenSoFar && LenSoFar != ~0ULL)
2769         return 0;    // Disagree -> unknown.
2770       LenSoFar = Len;
2771     }
2772 
2773     // Success, all agree.
2774     return LenSoFar;
2775   }
2776 
2777   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
2778   if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
2779     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
2780     if (Len1 == 0) return 0;
2781     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
2782     if (Len2 == 0) return 0;
2783     if (Len1 == ~0ULL) return Len2;
2784     if (Len2 == ~0ULL) return Len1;
2785     if (Len1 != Len2) return 0;
2786     return Len1;
2787   }
2788 
2789   // Otherwise, see if we can read the string.
2790   StringRef StrData;
2791   if (!getConstantStringInfo(V, StrData))
2792     return 0;
2793 
2794   return StrData.size()+1;
2795 }
2796 
2797 /// If we can compute the length of the string pointed to by
2798 /// the specified pointer, return 'len+1'.  If we can't, return 0.
2799 uint64_t llvm::GetStringLength(Value *V) {
2800   if (!V->getType()->isPointerTy()) return 0;
2801 
2802   SmallPtrSet<PHINode*, 32> PHIs;
2803   uint64_t Len = GetStringLengthH(V, PHIs);
2804   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
2805   // an empty string as a length.
2806   return Len == ~0ULL ? 1 : Len;
2807 }
2808 
2809 /// \brief \p PN defines a loop-variant pointer to an object.  Check if the
2810 /// previous iteration of the loop was referring to the same object as \p PN.
2811 static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) {
2812   // Find the loop-defined value.
2813   Loop *L = LI->getLoopFor(PN->getParent());
2814   if (PN->getNumIncomingValues() != 2)
2815     return true;
2816 
2817   // Find the value from previous iteration.
2818   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
2819   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
2820     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
2821   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
2822     return true;
2823 
2824   // If a new pointer is loaded in the loop, the pointer references a different
2825   // object in every iteration.  E.g.:
2826   //    for (i)
2827   //       int *p = a[i];
2828   //       ...
2829   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
2830     if (!L->isLoopInvariant(Load->getPointerOperand()))
2831       return false;
2832   return true;
2833 }
2834 
2835 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
2836                                  unsigned MaxLookup) {
2837   if (!V->getType()->isPointerTy())
2838     return V;
2839   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
2840     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2841       V = GEP->getPointerOperand();
2842     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
2843                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
2844       V = cast<Operator>(V)->getOperand(0);
2845     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2846       if (GA->isInterposable())
2847         return V;
2848       V = GA->getAliasee();
2849     } else {
2850       // See if InstructionSimplify knows any relevant tricks.
2851       if (Instruction *I = dyn_cast<Instruction>(V))
2852         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
2853         if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
2854           V = Simplified;
2855           continue;
2856         }
2857 
2858       return V;
2859     }
2860     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
2861   }
2862   return V;
2863 }
2864 
2865 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
2866                                 const DataLayout &DL, LoopInfo *LI,
2867                                 unsigned MaxLookup) {
2868   SmallPtrSet<Value *, 4> Visited;
2869   SmallVector<Value *, 4> Worklist;
2870   Worklist.push_back(V);
2871   do {
2872     Value *P = Worklist.pop_back_val();
2873     P = GetUnderlyingObject(P, DL, MaxLookup);
2874 
2875     if (!Visited.insert(P).second)
2876       continue;
2877 
2878     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
2879       Worklist.push_back(SI->getTrueValue());
2880       Worklist.push_back(SI->getFalseValue());
2881       continue;
2882     }
2883 
2884     if (PHINode *PN = dyn_cast<PHINode>(P)) {
2885       // If this PHI changes the underlying object in every iteration of the
2886       // loop, don't look through it.  Consider:
2887       //   int **A;
2888       //   for (i) {
2889       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
2890       //     Curr = A[i];
2891       //     *Prev, *Curr;
2892       //
2893       // Prev is tracking Curr one iteration behind so they refer to different
2894       // underlying objects.
2895       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
2896           isSameUnderlyingObjectInLoop(PN, LI))
2897         for (Value *IncValue : PN->incoming_values())
2898           Worklist.push_back(IncValue);
2899       continue;
2900     }
2901 
2902     Objects.push_back(P);
2903   } while (!Worklist.empty());
2904 }
2905 
2906 /// Return true if the only users of this pointer are lifetime markers.
2907 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
2908   for (const User *U : V->users()) {
2909     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2910     if (!II) return false;
2911 
2912     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
2913         II->getIntrinsicID() != Intrinsic::lifetime_end)
2914       return false;
2915   }
2916   return true;
2917 }
2918 
2919 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
2920                                         const Instruction *CtxI,
2921                                         const DominatorTree *DT,
2922                                         const TargetLibraryInfo *TLI) {
2923   const Operator *Inst = dyn_cast<Operator>(V);
2924   if (!Inst)
2925     return false;
2926 
2927   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
2928     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
2929       if (C->canTrap())
2930         return false;
2931 
2932   switch (Inst->getOpcode()) {
2933   default:
2934     return true;
2935   case Instruction::UDiv:
2936   case Instruction::URem: {
2937     // x / y is undefined if y == 0.
2938     const APInt *V;
2939     if (match(Inst->getOperand(1), m_APInt(V)))
2940       return *V != 0;
2941     return false;
2942   }
2943   case Instruction::SDiv:
2944   case Instruction::SRem: {
2945     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
2946     const APInt *Numerator, *Denominator;
2947     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
2948       return false;
2949     // We cannot hoist this division if the denominator is 0.
2950     if (*Denominator == 0)
2951       return false;
2952     // It's safe to hoist if the denominator is not 0 or -1.
2953     if (*Denominator != -1)
2954       return true;
2955     // At this point we know that the denominator is -1.  It is safe to hoist as
2956     // long we know that the numerator is not INT_MIN.
2957     if (match(Inst->getOperand(0), m_APInt(Numerator)))
2958       return !Numerator->isMinSignedValue();
2959     // The numerator *might* be MinSignedValue.
2960     return false;
2961   }
2962   case Instruction::Load: {
2963     const LoadInst *LI = cast<LoadInst>(Inst);
2964     if (!LI->isUnordered() ||
2965         // Speculative load may create a race that did not exist in the source.
2966         LI->getParent()->getParent()->hasFnAttribute(
2967             Attribute::SanitizeThread) ||
2968         // Speculative load may load data from dirty regions.
2969         LI->getParent()->getParent()->hasFnAttribute(
2970             Attribute::SanitizeAddress))
2971       return false;
2972     const DataLayout &DL = LI->getModule()->getDataLayout();
2973     return isDereferenceableAndAlignedPointer(
2974         LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI);
2975   }
2976   case Instruction::Call: {
2977     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
2978       switch (II->getIntrinsicID()) {
2979       // These synthetic intrinsics have no side-effects and just mark
2980       // information about their operands.
2981       // FIXME: There are other no-op synthetic instructions that potentially
2982       // should be considered at least *safe* to speculate...
2983       case Intrinsic::dbg_declare:
2984       case Intrinsic::dbg_value:
2985         return true;
2986 
2987       case Intrinsic::bswap:
2988       case Intrinsic::ctlz:
2989       case Intrinsic::ctpop:
2990       case Intrinsic::cttz:
2991       case Intrinsic::objectsize:
2992       case Intrinsic::sadd_with_overflow:
2993       case Intrinsic::smul_with_overflow:
2994       case Intrinsic::ssub_with_overflow:
2995       case Intrinsic::uadd_with_overflow:
2996       case Intrinsic::umul_with_overflow:
2997       case Intrinsic::usub_with_overflow:
2998         return true;
2999       // These intrinsics are defined to have the same behavior as libm
3000       // functions except for setting errno.
3001       case Intrinsic::sqrt:
3002       case Intrinsic::fma:
3003       case Intrinsic::fmuladd:
3004         return true;
3005       // These intrinsics are defined to have the same behavior as libm
3006       // functions, and the corresponding libm functions never set errno.
3007       case Intrinsic::trunc:
3008       case Intrinsic::copysign:
3009       case Intrinsic::fabs:
3010       case Intrinsic::minnum:
3011       case Intrinsic::maxnum:
3012         return true;
3013       // These intrinsics are defined to have the same behavior as libm
3014       // functions, which never overflow when operating on the IEEE754 types
3015       // that we support, and never set errno otherwise.
3016       case Intrinsic::ceil:
3017       case Intrinsic::floor:
3018       case Intrinsic::nearbyint:
3019       case Intrinsic::rint:
3020       case Intrinsic::round:
3021         return true;
3022       // TODO: are convert_{from,to}_fp16 safe?
3023       // TODO: can we list target-specific intrinsics here?
3024       default: break;
3025       }
3026     }
3027     return false; // The called function could have undefined behavior or
3028                   // side-effects, even if marked readnone nounwind.
3029   }
3030   case Instruction::VAArg:
3031   case Instruction::Alloca:
3032   case Instruction::Invoke:
3033   case Instruction::PHI:
3034   case Instruction::Store:
3035   case Instruction::Ret:
3036   case Instruction::Br:
3037   case Instruction::IndirectBr:
3038   case Instruction::Switch:
3039   case Instruction::Unreachable:
3040   case Instruction::Fence:
3041   case Instruction::AtomicRMW:
3042   case Instruction::AtomicCmpXchg:
3043   case Instruction::LandingPad:
3044   case Instruction::Resume:
3045   case Instruction::CatchSwitch:
3046   case Instruction::CatchPad:
3047   case Instruction::CatchRet:
3048   case Instruction::CleanupPad:
3049   case Instruction::CleanupRet:
3050     return false; // Misc instructions which have effects
3051   }
3052 }
3053 
3054 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3055   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3056 }
3057 
3058 /// Return true if we know that the specified value is never null.
3059 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
3060   assert(V->getType()->isPointerTy() && "V must be pointer type");
3061 
3062   // Alloca never returns null, malloc might.
3063   if (isa<AllocaInst>(V)) return true;
3064 
3065   // A byval, inalloca, or nonnull argument is never null.
3066   if (const Argument *A = dyn_cast<Argument>(V))
3067     return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3068 
3069   // A global variable in address space 0 is non null unless extern weak.
3070   // Other address spaces may have null as a valid address for a global,
3071   // so we can't assume anything.
3072   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3073     return !GV->hasExternalWeakLinkage() &&
3074            GV->getType()->getAddressSpace() == 0;
3075 
3076   // A Load tagged w/nonnull metadata is never null.
3077   if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3078     return LI->getMetadata(LLVMContext::MD_nonnull);
3079 
3080   if (auto CS = ImmutableCallSite(V))
3081     if (CS.isReturnNonNull())
3082       return true;
3083 
3084   return false;
3085 }
3086 
3087 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3088                                                   const Instruction *CtxI,
3089                                                   const DominatorTree *DT) {
3090   assert(V->getType()->isPointerTy() && "V must be pointer type");
3091 
3092   unsigned NumUsesExplored = 0;
3093   for (auto U : V->users()) {
3094     // Avoid massive lists
3095     if (NumUsesExplored >= DomConditionsMaxUses)
3096       break;
3097     NumUsesExplored++;
3098     // Consider only compare instructions uniquely controlling a branch
3099     const ICmpInst *Cmp = dyn_cast<ICmpInst>(U);
3100     if (!Cmp)
3101       continue;
3102 
3103     for (auto *CmpU : Cmp->users()) {
3104       const BranchInst *BI = dyn_cast<BranchInst>(CmpU);
3105       if (!BI)
3106         continue;
3107 
3108       assert(BI->isConditional() && "uses a comparison!");
3109 
3110       BasicBlock *NonNullSuccessor = nullptr;
3111       CmpInst::Predicate Pred;
3112 
3113       if (match(const_cast<ICmpInst*>(Cmp),
3114                 m_c_ICmp(Pred, m_Specific(V), m_Zero()))) {
3115         if (Pred == ICmpInst::ICMP_EQ)
3116           NonNullSuccessor = BI->getSuccessor(1);
3117         else if (Pred == ICmpInst::ICMP_NE)
3118           NonNullSuccessor = BI->getSuccessor(0);
3119       }
3120 
3121       if (NonNullSuccessor) {
3122         BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3123         if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3124           return true;
3125       }
3126     }
3127   }
3128 
3129   return false;
3130 }
3131 
3132 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3133                    const DominatorTree *DT, const TargetLibraryInfo *TLI) {
3134   if (isKnownNonNull(V, TLI))
3135     return true;
3136 
3137   return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
3138 }
3139 
3140 OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
3141                                                    const DataLayout &DL,
3142                                                    AssumptionCache *AC,
3143                                                    const Instruction *CxtI,
3144                                                    const DominatorTree *DT) {
3145   // Multiplying n * m significant bits yields a result of n + m significant
3146   // bits. If the total number of significant bits does not exceed the
3147   // result bit width (minus 1), there is no overflow.
3148   // This means if we have enough leading zero bits in the operands
3149   // we can guarantee that the result does not overflow.
3150   // Ref: "Hacker's Delight" by Henry Warren
3151   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3152   APInt LHSKnownZero(BitWidth, 0);
3153   APInt LHSKnownOne(BitWidth, 0);
3154   APInt RHSKnownZero(BitWidth, 0);
3155   APInt RHSKnownOne(BitWidth, 0);
3156   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3157                    DT);
3158   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3159                    DT);
3160   // Note that underestimating the number of zero bits gives a more
3161   // conservative answer.
3162   unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
3163                       RHSKnownZero.countLeadingOnes();
3164   // First handle the easy case: if we have enough zero bits there's
3165   // definitely no overflow.
3166   if (ZeroBits >= BitWidth)
3167     return OverflowResult::NeverOverflows;
3168 
3169   // Get the largest possible values for each operand.
3170   APInt LHSMax = ~LHSKnownZero;
3171   APInt RHSMax = ~RHSKnownZero;
3172 
3173   // We know the multiply operation doesn't overflow if the maximum values for
3174   // each operand will not overflow after we multiply them together.
3175   bool MaxOverflow;
3176   LHSMax.umul_ov(RHSMax, MaxOverflow);
3177   if (!MaxOverflow)
3178     return OverflowResult::NeverOverflows;
3179 
3180   // We know it always overflows if multiplying the smallest possible values for
3181   // the operands also results in overflow.
3182   bool MinOverflow;
3183   LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
3184   if (MinOverflow)
3185     return OverflowResult::AlwaysOverflows;
3186 
3187   return OverflowResult::MayOverflow;
3188 }
3189 
3190 OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
3191                                                    const DataLayout &DL,
3192                                                    AssumptionCache *AC,
3193                                                    const Instruction *CxtI,
3194                                                    const DominatorTree *DT) {
3195   bool LHSKnownNonNegative, LHSKnownNegative;
3196   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3197                  AC, CxtI, DT);
3198   if (LHSKnownNonNegative || LHSKnownNegative) {
3199     bool RHSKnownNonNegative, RHSKnownNegative;
3200     ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3201                    AC, CxtI, DT);
3202 
3203     if (LHSKnownNegative && RHSKnownNegative) {
3204       // The sign bit is set in both cases: this MUST overflow.
3205       // Create a simple add instruction, and insert it into the struct.
3206       return OverflowResult::AlwaysOverflows;
3207     }
3208 
3209     if (LHSKnownNonNegative && RHSKnownNonNegative) {
3210       // The sign bit is clear in both cases: this CANNOT overflow.
3211       // Create a simple add instruction, and insert it into the struct.
3212       return OverflowResult::NeverOverflows;
3213     }
3214   }
3215 
3216   return OverflowResult::MayOverflow;
3217 }
3218 
3219 static OverflowResult computeOverflowForSignedAdd(
3220     Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL,
3221     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) {
3222   if (Add && Add->hasNoSignedWrap()) {
3223     return OverflowResult::NeverOverflows;
3224   }
3225 
3226   bool LHSKnownNonNegative, LHSKnownNegative;
3227   bool RHSKnownNonNegative, RHSKnownNegative;
3228   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3229                  AC, CxtI, DT);
3230   ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3231                  AC, CxtI, DT);
3232 
3233   if ((LHSKnownNonNegative && RHSKnownNegative) ||
3234       (LHSKnownNegative && RHSKnownNonNegative)) {
3235     // The sign bits are opposite: this CANNOT overflow.
3236     return OverflowResult::NeverOverflows;
3237   }
3238 
3239   // The remaining code needs Add to be available. Early returns if not so.
3240   if (!Add)
3241     return OverflowResult::MayOverflow;
3242 
3243   // If the sign of Add is the same as at least one of the operands, this add
3244   // CANNOT overflow. This is particularly useful when the sum is
3245   // @llvm.assume'ed non-negative rather than proved so from analyzing its
3246   // operands.
3247   bool LHSOrRHSKnownNonNegative =
3248       (LHSKnownNonNegative || RHSKnownNonNegative);
3249   bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
3250   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3251     bool AddKnownNonNegative, AddKnownNegative;
3252     ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
3253                    /*Depth=*/0, AC, CxtI, DT);
3254     if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
3255         (AddKnownNegative && LHSOrRHSKnownNegative)) {
3256       return OverflowResult::NeverOverflows;
3257     }
3258   }
3259 
3260   return OverflowResult::MayOverflow;
3261 }
3262 
3263 OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
3264                                                  const DataLayout &DL,
3265                                                  AssumptionCache *AC,
3266                                                  const Instruction *CxtI,
3267                                                  const DominatorTree *DT) {
3268   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3269                                        Add, DL, AC, CxtI, DT);
3270 }
3271 
3272 OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS,
3273                                                  const DataLayout &DL,
3274                                                  AssumptionCache *AC,
3275                                                  const Instruction *CxtI,
3276                                                  const DominatorTree *DT) {
3277   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3278 }
3279 
3280 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3281   // FIXME: This conservative implementation can be relaxed. E.g. most
3282   // atomic operations are guaranteed to terminate on most platforms
3283   // and most functions terminate.
3284 
3285   return !I->isAtomic() &&       // atomics may never succeed on some platforms
3286          !isa<CallInst>(I) &&    // could throw and might not terminate
3287          !isa<InvokeInst>(I) &&  // might not terminate and could throw to
3288                                  //   non-successor (see bug 24185 for details).
3289          !isa<ResumeInst>(I) &&  // has no successors
3290          !isa<ReturnInst>(I);    // has no successors
3291 }
3292 
3293 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3294                                                   const Loop *L) {
3295   // The loop header is guaranteed to be executed for every iteration.
3296   //
3297   // FIXME: Relax this constraint to cover all basic blocks that are
3298   // guaranteed to be executed at every iteration.
3299   if (I->getParent() != L->getHeader()) return false;
3300 
3301   for (const Instruction &LI : *L->getHeader()) {
3302     if (&LI == I) return true;
3303     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3304   }
3305   llvm_unreachable("Instruction not contained in its own parent basic block.");
3306 }
3307 
3308 bool llvm::propagatesFullPoison(const Instruction *I) {
3309   switch (I->getOpcode()) {
3310     case Instruction::Add:
3311     case Instruction::Sub:
3312     case Instruction::Xor:
3313     case Instruction::Trunc:
3314     case Instruction::BitCast:
3315     case Instruction::AddrSpaceCast:
3316       // These operations all propagate poison unconditionally. Note that poison
3317       // is not any particular value, so xor or subtraction of poison with
3318       // itself still yields poison, not zero.
3319       return true;
3320 
3321     case Instruction::AShr:
3322     case Instruction::SExt:
3323       // For these operations, one bit of the input is replicated across
3324       // multiple output bits. A replicated poison bit is still poison.
3325       return true;
3326 
3327     case Instruction::Shl: {
3328       // Left shift *by* a poison value is poison. The number of
3329       // positions to shift is unsigned, so no negative values are
3330       // possible there. Left shift by zero places preserves poison. So
3331       // it only remains to consider left shift of poison by a positive
3332       // number of places.
3333       //
3334       // A left shift by a positive number of places leaves the lowest order bit
3335       // non-poisoned. However, if such a shift has a no-wrap flag, then we can
3336       // make the poison operand violate that flag, yielding a fresh full-poison
3337       // value.
3338       auto *OBO = cast<OverflowingBinaryOperator>(I);
3339       return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
3340     }
3341 
3342     case Instruction::Mul: {
3343       // A multiplication by zero yields a non-poison zero result, so we need to
3344       // rule out zero as an operand. Conservatively, multiplication by a
3345       // non-zero constant is not multiplication by zero.
3346       //
3347       // Multiplication by a non-zero constant can leave some bits
3348       // non-poisoned. For example, a multiplication by 2 leaves the lowest
3349       // order bit unpoisoned. So we need to consider that.
3350       //
3351       // Multiplication by 1 preserves poison. If the multiplication has a
3352       // no-wrap flag, then we can make the poison operand violate that flag
3353       // when multiplied by any integer other than 0 and 1.
3354       auto *OBO = cast<OverflowingBinaryOperator>(I);
3355       if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) {
3356         for (Value *V : OBO->operands()) {
3357           if (auto *CI = dyn_cast<ConstantInt>(V)) {
3358             // A ConstantInt cannot yield poison, so we can assume that it is
3359             // the other operand that is poison.
3360             return !CI->isZero();
3361           }
3362         }
3363       }
3364       return false;
3365     }
3366 
3367     case Instruction::GetElementPtr:
3368       // A GEP implicitly represents a sequence of additions, subtractions,
3369       // truncations, sign extensions and multiplications. The multiplications
3370       // are by the non-zero sizes of some set of types, so we do not have to be
3371       // concerned with multiplication by zero. If the GEP is in-bounds, then
3372       // these operations are implicitly no-signed-wrap so poison is propagated
3373       // by the arguments above for Add, Sub, Trunc, SExt and Mul.
3374       return cast<GEPOperator>(I)->isInBounds();
3375 
3376     default:
3377       return false;
3378   }
3379 }
3380 
3381 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3382   switch (I->getOpcode()) {
3383     case Instruction::Store:
3384       return cast<StoreInst>(I)->getPointerOperand();
3385 
3386     case Instruction::Load:
3387       return cast<LoadInst>(I)->getPointerOperand();
3388 
3389     case Instruction::AtomicCmpXchg:
3390       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3391 
3392     case Instruction::AtomicRMW:
3393       return cast<AtomicRMWInst>(I)->getPointerOperand();
3394 
3395     case Instruction::UDiv:
3396     case Instruction::SDiv:
3397     case Instruction::URem:
3398     case Instruction::SRem:
3399       return I->getOperand(1);
3400 
3401     default:
3402       return nullptr;
3403   }
3404 }
3405 
3406 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
3407   // We currently only look for uses of poison values within the same basic
3408   // block, as that makes it easier to guarantee that the uses will be
3409   // executed given that PoisonI is executed.
3410   //
3411   // FIXME: Expand this to consider uses beyond the same basic block. To do
3412   // this, look out for the distinction between post-dominance and strong
3413   // post-dominance.
3414   const BasicBlock *BB = PoisonI->getParent();
3415 
3416   // Set of instructions that we have proved will yield poison if PoisonI
3417   // does.
3418   SmallSet<const Value *, 16> YieldsPoison;
3419   YieldsPoison.insert(PoisonI);
3420 
3421   for (BasicBlock::const_iterator I = PoisonI->getIterator(), E = BB->end();
3422        I != E; ++I) {
3423     if (&*I != PoisonI) {
3424       const Value *NotPoison = getGuaranteedNonFullPoisonOp(&*I);
3425       if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true;
3426       if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
3427         return false;
3428     }
3429 
3430     // Mark poison that propagates from I through uses of I.
3431     if (YieldsPoison.count(&*I)) {
3432       for (const User *User : I->users()) {
3433         const Instruction *UserI = cast<Instruction>(User);
3434         if (UserI->getParent() == BB && propagatesFullPoison(UserI))
3435           YieldsPoison.insert(User);
3436       }
3437     }
3438   }
3439   return false;
3440 }
3441 
3442 static bool isKnownNonNaN(Value *V, FastMathFlags FMF) {
3443   if (FMF.noNaNs())
3444     return true;
3445 
3446   if (auto *C = dyn_cast<ConstantFP>(V))
3447     return !C->isNaN();
3448   return false;
3449 }
3450 
3451 static bool isKnownNonZero(Value *V) {
3452   if (auto *C = dyn_cast<ConstantFP>(V))
3453     return !C->isZero();
3454   return false;
3455 }
3456 
3457 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
3458                                               FastMathFlags FMF,
3459                                               Value *CmpLHS, Value *CmpRHS,
3460                                               Value *TrueVal, Value *FalseVal,
3461                                               Value *&LHS, Value *&RHS) {
3462   LHS = CmpLHS;
3463   RHS = CmpRHS;
3464 
3465   // If the predicate is an "or-equal"  (FP) predicate, then signed zeroes may
3466   // return inconsistent results between implementations.
3467   //   (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
3468   //   minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
3469   // Therefore we behave conservatively and only proceed if at least one of the
3470   // operands is known to not be zero, or if we don't care about signed zeroes.
3471   switch (Pred) {
3472   default: break;
3473   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
3474   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
3475     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
3476         !isKnownNonZero(CmpRHS))
3477       return {SPF_UNKNOWN, SPNB_NA, false};
3478   }
3479 
3480   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
3481   bool Ordered = false;
3482 
3483   // When given one NaN and one non-NaN input:
3484   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
3485   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
3486   //     ordered comparison fails), which could be NaN or non-NaN.
3487   // so here we discover exactly what NaN behavior is required/accepted.
3488   if (CmpInst::isFPPredicate(Pred)) {
3489     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
3490     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
3491 
3492     if (LHSSafe && RHSSafe) {
3493       // Both operands are known non-NaN.
3494       NaNBehavior = SPNB_RETURNS_ANY;
3495     } else if (CmpInst::isOrdered(Pred)) {
3496       // An ordered comparison will return false when given a NaN, so it
3497       // returns the RHS.
3498       Ordered = true;
3499       if (LHSSafe)
3500         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
3501         NaNBehavior = SPNB_RETURNS_NAN;
3502       else if (RHSSafe)
3503         NaNBehavior = SPNB_RETURNS_OTHER;
3504       else
3505         // Completely unsafe.
3506         return {SPF_UNKNOWN, SPNB_NA, false};
3507     } else {
3508       Ordered = false;
3509       // An unordered comparison will return true when given a NaN, so it
3510       // returns the LHS.
3511       if (LHSSafe)
3512         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
3513         NaNBehavior = SPNB_RETURNS_OTHER;
3514       else if (RHSSafe)
3515         NaNBehavior = SPNB_RETURNS_NAN;
3516       else
3517         // Completely unsafe.
3518         return {SPF_UNKNOWN, SPNB_NA, false};
3519     }
3520   }
3521 
3522   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
3523     std::swap(CmpLHS, CmpRHS);
3524     Pred = CmpInst::getSwappedPredicate(Pred);
3525     if (NaNBehavior == SPNB_RETURNS_NAN)
3526       NaNBehavior = SPNB_RETURNS_OTHER;
3527     else if (NaNBehavior == SPNB_RETURNS_OTHER)
3528       NaNBehavior = SPNB_RETURNS_NAN;
3529     Ordered = !Ordered;
3530   }
3531 
3532   // ([if]cmp X, Y) ? X : Y
3533   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
3534     switch (Pred) {
3535     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
3536     case ICmpInst::ICMP_UGT:
3537     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
3538     case ICmpInst::ICMP_SGT:
3539     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
3540     case ICmpInst::ICMP_ULT:
3541     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
3542     case ICmpInst::ICMP_SLT:
3543     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
3544     case FCmpInst::FCMP_UGT:
3545     case FCmpInst::FCMP_UGE:
3546     case FCmpInst::FCMP_OGT:
3547     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
3548     case FCmpInst::FCMP_ULT:
3549     case FCmpInst::FCMP_ULE:
3550     case FCmpInst::FCMP_OLT:
3551     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
3552     }
3553   }
3554 
3555   if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) {
3556     if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
3557         (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
3558 
3559       // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
3560       // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
3561       if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) {
3562         return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3563       }
3564 
3565       // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
3566       // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
3567       if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) {
3568         return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3569       }
3570     }
3571 
3572     // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C)
3573     if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) {
3574       if (Pred == ICmpInst::ICMP_SGT && C1->getType() == C2->getType() &&
3575           ~C1->getValue() == C2->getValue() &&
3576           (match(TrueVal, m_Not(m_Specific(CmpLHS))) ||
3577            match(CmpLHS, m_Not(m_Specific(TrueVal))))) {
3578         LHS = TrueVal;
3579         RHS = FalseVal;
3580         return {SPF_SMIN, SPNB_NA, false};
3581       }
3582     }
3583   }
3584 
3585   // TODO: (X > 4) ? X : 5   -->  (X >= 5) ? X : 5  -->  MAX(X, 5)
3586 
3587   return {SPF_UNKNOWN, SPNB_NA, false};
3588 }
3589 
3590 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
3591                               Instruction::CastOps *CastOp) {
3592   CastInst *CI = dyn_cast<CastInst>(V1);
3593   Constant *C = dyn_cast<Constant>(V2);
3594   CastInst *CI2 = dyn_cast<CastInst>(V2);
3595   if (!CI)
3596     return nullptr;
3597   *CastOp = CI->getOpcode();
3598 
3599   if (CI2) {
3600     // If V1 and V2 are both the same cast from the same type, we can look
3601     // through V1.
3602     if (CI2->getOpcode() == CI->getOpcode() &&
3603         CI2->getSrcTy() == CI->getSrcTy())
3604       return CI2->getOperand(0);
3605     return nullptr;
3606   } else if (!C) {
3607     return nullptr;
3608   }
3609 
3610   if (isa<SExtInst>(CI) && CmpI->isSigned()) {
3611     Constant *T = ConstantExpr::getTrunc(C, CI->getSrcTy());
3612     // This is only valid if the truncated value can be sign-extended
3613     // back to the original value.
3614     if (ConstantExpr::getSExt(T, C->getType()) == C)
3615       return T;
3616     return nullptr;
3617   }
3618   if (isa<ZExtInst>(CI) && CmpI->isUnsigned())
3619     return ConstantExpr::getTrunc(C, CI->getSrcTy());
3620 
3621   if (isa<TruncInst>(CI))
3622     return ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned());
3623 
3624   if (isa<FPToUIInst>(CI))
3625     return ConstantExpr::getUIToFP(C, CI->getSrcTy(), true);
3626 
3627   if (isa<FPToSIInst>(CI))
3628     return ConstantExpr::getSIToFP(C, CI->getSrcTy(), true);
3629 
3630   if (isa<UIToFPInst>(CI))
3631     return ConstantExpr::getFPToUI(C, CI->getSrcTy(), true);
3632 
3633   if (isa<SIToFPInst>(CI))
3634     return ConstantExpr::getFPToSI(C, CI->getSrcTy(), true);
3635 
3636   if (isa<FPTruncInst>(CI))
3637     return ConstantExpr::getFPExtend(C, CI->getSrcTy(), true);
3638 
3639   if (isa<FPExtInst>(CI))
3640     return ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true);
3641 
3642   return nullptr;
3643 }
3644 
3645 SelectPatternResult llvm::matchSelectPattern(Value *V,
3646                                              Value *&LHS, Value *&RHS,
3647                                              Instruction::CastOps *CastOp) {
3648   SelectInst *SI = dyn_cast<SelectInst>(V);
3649   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
3650 
3651   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
3652   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
3653 
3654   CmpInst::Predicate Pred = CmpI->getPredicate();
3655   Value *CmpLHS = CmpI->getOperand(0);
3656   Value *CmpRHS = CmpI->getOperand(1);
3657   Value *TrueVal = SI->getTrueValue();
3658   Value *FalseVal = SI->getFalseValue();
3659   FastMathFlags FMF;
3660   if (isa<FPMathOperator>(CmpI))
3661     FMF = CmpI->getFastMathFlags();
3662 
3663   // Bail out early.
3664   if (CmpI->isEquality())
3665     return {SPF_UNKNOWN, SPNB_NA, false};
3666 
3667   // Deal with type mismatches.
3668   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
3669     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
3670       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
3671                                   cast<CastInst>(TrueVal)->getOperand(0), C,
3672                                   LHS, RHS);
3673     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
3674       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
3675                                   C, cast<CastInst>(FalseVal)->getOperand(0),
3676                                   LHS, RHS);
3677   }
3678   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
3679                               LHS, RHS);
3680 }
3681 
3682 ConstantRange llvm::getConstantRangeFromMetadata(MDNode &Ranges) {
3683   const unsigned NumRanges = Ranges.getNumOperands() / 2;
3684   assert(NumRanges >= 1 && "Must have at least one range!");
3685   assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs");
3686 
3687   auto *FirstLow = mdconst::extract<ConstantInt>(Ranges.getOperand(0));
3688   auto *FirstHigh = mdconst::extract<ConstantInt>(Ranges.getOperand(1));
3689 
3690   ConstantRange CR(FirstLow->getValue(), FirstHigh->getValue());
3691 
3692   for (unsigned i = 1; i < NumRanges; ++i) {
3693     auto *Low = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
3694     auto *High = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
3695 
3696     // Note: unionWith will potentially create a range that contains values not
3697     // contained in any of the original N ranges.
3698     CR = CR.unionWith(ConstantRange(Low->getValue(), High->getValue()));
3699   }
3700 
3701   return CR;
3702 }
3703 
3704 /// Return true if "icmp Pred LHS RHS" is always true.
3705 static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
3706                             const DataLayout &DL, unsigned Depth,
3707                             AssumptionCache *AC, const Instruction *CxtI,
3708                             const DominatorTree *DT) {
3709   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
3710   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
3711     return true;
3712 
3713   switch (Pred) {
3714   default:
3715     return false;
3716 
3717   case CmpInst::ICMP_SLE: {
3718     const APInt *C;
3719 
3720     // LHS s<= LHS +_{nsw} C   if C >= 0
3721     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
3722       return !C->isNegative();
3723     return false;
3724   }
3725 
3726   case CmpInst::ICMP_ULE: {
3727     const APInt *C;
3728 
3729     // LHS u<= LHS +_{nuw} C   for any C
3730     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
3731       return true;
3732 
3733     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
3734     auto MatchNUWAddsToSameValue = [&](Value *A, Value *B, Value *&X,
3735                                        const APInt *&CA, const APInt *&CB) {
3736       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
3737           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
3738         return true;
3739 
3740       // If X & C == 0 then (X | C) == X +_{nuw} C
3741       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
3742           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
3743         unsigned BitWidth = CA->getBitWidth();
3744         APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3745         computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
3746 
3747         if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
3748           return true;
3749       }
3750 
3751       return false;
3752     };
3753 
3754     Value *X;
3755     const APInt *CLHS, *CRHS;
3756     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
3757       return CLHS->ule(*CRHS);
3758 
3759     return false;
3760   }
3761   }
3762 }
3763 
3764 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
3765 /// ALHS ARHS" is true.
3766 static bool isImpliedCondOperands(CmpInst::Predicate Pred, Value *ALHS,
3767                                   Value *ARHS, Value *BLHS, Value *BRHS,
3768                                   const DataLayout &DL, unsigned Depth,
3769                                   AssumptionCache *AC, const Instruction *CxtI,
3770                                   const DominatorTree *DT) {
3771   switch (Pred) {
3772   default:
3773     return false;
3774 
3775   case CmpInst::ICMP_SLT:
3776   case CmpInst::ICMP_SLE:
3777     return isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
3778                            DT) &&
3779            isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI,
3780                            DT);
3781 
3782   case CmpInst::ICMP_ULT:
3783   case CmpInst::ICMP_ULE:
3784     return isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
3785                            DT) &&
3786            isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI,
3787                            DT);
3788   }
3789 }
3790 
3791 bool llvm::isImpliedCondition(Value *LHS, Value *RHS, const DataLayout &DL,
3792                               unsigned Depth, AssumptionCache *AC,
3793                               const Instruction *CxtI,
3794                               const DominatorTree *DT) {
3795   assert(LHS->getType() == RHS->getType() && "mismatched type");
3796   Type *OpTy = LHS->getType();
3797   assert(OpTy->getScalarType()->isIntegerTy(1));
3798 
3799   // LHS ==> RHS by definition
3800   if (LHS == RHS) return true;
3801 
3802   if (OpTy->isVectorTy())
3803     // TODO: extending the code below to handle vectors
3804     return false;
3805   assert(OpTy->isIntegerTy(1) && "implied by above");
3806 
3807   ICmpInst::Predicate APred, BPred;
3808   Value *ALHS, *ARHS;
3809   Value *BLHS, *BRHS;
3810 
3811   if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
3812       !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
3813     return false;
3814 
3815   if (APred == BPred)
3816     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
3817                                  CxtI, DT);
3818 
3819   return false;
3820 }
3821