1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/IR/CallSite.h"
24 #include "llvm/IR/ConstantRange.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/GetElementPtrTypeIterator.h"
29 #include "llvm/IR/GlobalAlias.h"
30 #include "llvm/IR/GlobalVariable.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/Metadata.h"
35 #include "llvm/IR/Operator.h"
36 #include "llvm/IR/PatternMatch.h"
37 #include "llvm/IR/Statepoint.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/MathExtras.h"
40 #include <algorithm>
41 #include <array>
42 #include <cstring>
43 using namespace llvm;
44 using namespace llvm::PatternMatch;
45 
46 const unsigned MaxDepth = 6;
47 
48 // Controls the number of uses of the value searched for possible
49 // dominating comparisons.
50 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
51                                               cl::Hidden, cl::init(20));
52 
53 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns
54 /// 0). For vector types, returns the element type's bitwidth.
55 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
56   if (unsigned BitWidth = Ty->getScalarSizeInBits())
57     return BitWidth;
58 
59   return DL.getPointerTypeSizeInBits(Ty);
60 }
61 
62 namespace {
63 // Simplifying using an assume can only be done in a particular control-flow
64 // context (the context instruction provides that context). If an assume and
65 // the context instruction are not in the same block then the DT helps in
66 // figuring out if we can use it.
67 struct Query {
68   const DataLayout &DL;
69   AssumptionCache *AC;
70   const Instruction *CxtI;
71   const DominatorTree *DT;
72 
73   /// Set of assumptions that should be excluded from further queries.
74   /// This is because of the potential for mutual recursion to cause
75   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
76   /// classic case of this is assume(x = y), which will attempt to determine
77   /// bits in x from bits in y, which will attempt to determine bits in y from
78   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
79   /// isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
80   /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so
81   /// on.
82   std::array<const Value*, MaxDepth> Excluded;
83   unsigned NumExcluded;
84 
85   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
86         const DominatorTree *DT)
87       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), NumExcluded(0) {}
88 
89   Query(const Query &Q, const Value *NewExcl)
90       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) {
91     Excluded = Q.Excluded;
92     Excluded[NumExcluded++] = NewExcl;
93     assert(NumExcluded <= Excluded.size());
94   }
95 
96   bool isExcluded(const Value *Value) const {
97     if (NumExcluded == 0)
98       return false;
99     auto End = Excluded.begin() + NumExcluded;
100     return std::find(Excluded.begin(), End, Value) != End;
101   }
102 };
103 } // end anonymous namespace
104 
105 // Given the provided Value and, potentially, a context instruction, return
106 // the preferred context instruction (if any).
107 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
108   // If we've been provided with a context instruction, then use that (provided
109   // it has been inserted).
110   if (CxtI && CxtI->getParent())
111     return CxtI;
112 
113   // If the value is really an already-inserted instruction, then use that.
114   CxtI = dyn_cast<Instruction>(V);
115   if (CxtI && CxtI->getParent())
116     return CxtI;
117 
118   return nullptr;
119 }
120 
121 static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
122                              unsigned Depth, const Query &Q);
123 
124 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
125                             const DataLayout &DL, unsigned Depth,
126                             AssumptionCache *AC, const Instruction *CxtI,
127                             const DominatorTree *DT) {
128   ::computeKnownBits(V, KnownZero, KnownOne, Depth,
129                      Query(DL, AC, safeCxtI(V, CxtI), DT));
130 }
131 
132 bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
133                                AssumptionCache *AC, const Instruction *CxtI,
134                                const DominatorTree *DT) {
135   assert(LHS->getType() == RHS->getType() &&
136          "LHS and RHS should have the same type");
137   assert(LHS->getType()->isIntOrIntVectorTy() &&
138          "LHS and RHS should be integers");
139   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
140   APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
141   APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
142   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
143   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
144   return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
145 }
146 
147 static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
148                            unsigned Depth, const Query &Q);
149 
150 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
151                           const DataLayout &DL, unsigned Depth,
152                           AssumptionCache *AC, const Instruction *CxtI,
153                           const DominatorTree *DT) {
154   ::ComputeSignBit(V, KnownZero, KnownOne, Depth,
155                    Query(DL, AC, safeCxtI(V, CxtI), DT));
156 }
157 
158 static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
159                                    const Query &Q);
160 
161 bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero,
162                                   unsigned Depth, AssumptionCache *AC,
163                                   const Instruction *CxtI,
164                                   const DominatorTree *DT) {
165   return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
166                                   Query(DL, AC, safeCxtI(V, CxtI), DT));
167 }
168 
169 static bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q);
170 
171 bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
172                           AssumptionCache *AC, const Instruction *CxtI,
173                           const DominatorTree *DT) {
174   return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
175 }
176 
177 bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth,
178                               AssumptionCache *AC, const Instruction *CxtI,
179                               const DominatorTree *DT) {
180   bool NonNegative, Negative;
181   ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
182   return NonNegative;
183 }
184 
185 static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q);
186 
187 bool llvm::isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
188                           AssumptionCache *AC, const Instruction *CxtI,
189                           const DominatorTree *DT) {
190   return ::isKnownNonEqual(V1, V2, Query(DL, AC,
191                                          safeCxtI(V1, safeCxtI(V2, CxtI)),
192                                          DT));
193 }
194 
195 static bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth,
196                               const Query &Q);
197 
198 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
199                              unsigned Depth, AssumptionCache *AC,
200                              const Instruction *CxtI, const DominatorTree *DT) {
201   return ::MaskedValueIsZero(V, Mask, Depth,
202                              Query(DL, AC, safeCxtI(V, CxtI), DT));
203 }
204 
205 static unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q);
206 
207 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL,
208                                   unsigned Depth, AssumptionCache *AC,
209                                   const Instruction *CxtI,
210                                   const DominatorTree *DT) {
211   return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
212 }
213 
214 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
215                                    APInt &KnownZero, APInt &KnownOne,
216                                    APInt &KnownZero2, APInt &KnownOne2,
217                                    unsigned Depth, const Query &Q) {
218   if (!Add) {
219     if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
220       // We know that the top bits of C-X are clear if X contains less bits
221       // than C (i.e. no wrap-around can happen).  For example, 20-X is
222       // positive if we can prove that X is >= 0 and < 16.
223       if (!CLHS->getValue().isNegative()) {
224         unsigned BitWidth = KnownZero.getBitWidth();
225         unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
226         // NLZ can't be BitWidth with no sign bit
227         APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
228         computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
229 
230         // If all of the MaskV bits are known to be zero, then we know the
231         // output top bits are zero, because we now know that the output is
232         // from [0-C].
233         if ((KnownZero2 & MaskV) == MaskV) {
234           unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
235           // Top bits known zero.
236           KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
237         }
238       }
239     }
240   }
241 
242   unsigned BitWidth = KnownZero.getBitWidth();
243 
244   // If an initial sequence of bits in the result is not needed, the
245   // corresponding bits in the operands are not needed.
246   APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
247   computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, Depth + 1, Q);
248   computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
249 
250   // Carry in a 1 for a subtract, rather than a 0.
251   APInt CarryIn(BitWidth, 0);
252   if (!Add) {
253     // Sum = LHS + ~RHS + 1
254     std::swap(KnownZero2, KnownOne2);
255     CarryIn.setBit(0);
256   }
257 
258   APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
259   APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
260 
261   // Compute known bits of the carry.
262   APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
263   APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
264 
265   // Compute set of known bits (where all three relevant bits are known).
266   APInt LHSKnown = LHSKnownZero | LHSKnownOne;
267   APInt RHSKnown = KnownZero2 | KnownOne2;
268   APInt CarryKnown = CarryKnownZero | CarryKnownOne;
269   APInt Known = LHSKnown & RHSKnown & CarryKnown;
270 
271   assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
272          "known bits of sum differ");
273 
274   // Compute known bits of the result.
275   KnownZero = ~PossibleSumOne & Known;
276   KnownOne = PossibleSumOne & Known;
277 
278   // Are we still trying to solve for the sign bit?
279   if (!Known.isNegative()) {
280     if (NSW) {
281       // Adding two non-negative numbers, or subtracting a negative number from
282       // a non-negative one, can't wrap into negative.
283       if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
284         KnownZero |= APInt::getSignBit(BitWidth);
285       // Adding two negative numbers, or subtracting a non-negative number from
286       // a negative one, can't wrap into non-negative.
287       else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
288         KnownOne |= APInt::getSignBit(BitWidth);
289     }
290   }
291 }
292 
293 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
294                                 APInt &KnownZero, APInt &KnownOne,
295                                 APInt &KnownZero2, APInt &KnownOne2,
296                                 unsigned Depth, const Query &Q) {
297   unsigned BitWidth = KnownZero.getBitWidth();
298   computeKnownBits(Op1, KnownZero, KnownOne, Depth + 1, Q);
299   computeKnownBits(Op0, KnownZero2, KnownOne2, Depth + 1, Q);
300 
301   bool isKnownNegative = false;
302   bool isKnownNonNegative = false;
303   // If the multiplication is known not to overflow, compute the sign bit.
304   if (NSW) {
305     if (Op0 == Op1) {
306       // The product of a number with itself is non-negative.
307       isKnownNonNegative = true;
308     } else {
309       bool isKnownNonNegativeOp1 = KnownZero.isNegative();
310       bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
311       bool isKnownNegativeOp1 = KnownOne.isNegative();
312       bool isKnownNegativeOp0 = KnownOne2.isNegative();
313       // The product of two numbers with the same sign is non-negative.
314       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
315         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
316       // The product of a negative number and a non-negative number is either
317       // negative or zero.
318       if (!isKnownNonNegative)
319         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
320                            isKnownNonZero(Op0, Depth, Q)) ||
321                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
322                            isKnownNonZero(Op1, Depth, Q));
323     }
324   }
325 
326   // If low bits are zero in either operand, output low known-0 bits.
327   // Also compute a conservative estimate for high known-0 bits.
328   // More trickiness is possible, but this is sufficient for the
329   // interesting case of alignment computation.
330   KnownOne.clearAllBits();
331   unsigned TrailZ = KnownZero.countTrailingOnes() +
332                     KnownZero2.countTrailingOnes();
333   unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
334                              KnownZero2.countLeadingOnes(),
335                              BitWidth) - BitWidth;
336 
337   TrailZ = std::min(TrailZ, BitWidth);
338   LeadZ = std::min(LeadZ, BitWidth);
339   KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
340               APInt::getHighBitsSet(BitWidth, LeadZ);
341 
342   // Only make use of no-wrap flags if we failed to compute the sign bit
343   // directly.  This matters if the multiplication always overflows, in
344   // which case we prefer to follow the result of the direct computation,
345   // though as the program is invoking undefined behaviour we can choose
346   // whatever we like here.
347   if (isKnownNonNegative && !KnownOne.isNegative())
348     KnownZero.setBit(BitWidth - 1);
349   else if (isKnownNegative && !KnownZero.isNegative())
350     KnownOne.setBit(BitWidth - 1);
351 }
352 
353 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
354                                              APInt &KnownZero,
355                                              APInt &KnownOne) {
356   unsigned BitWidth = KnownZero.getBitWidth();
357   unsigned NumRanges = Ranges.getNumOperands() / 2;
358   assert(NumRanges >= 1);
359 
360   KnownZero.setAllBits();
361   KnownOne.setAllBits();
362 
363   for (unsigned i = 0; i < NumRanges; ++i) {
364     ConstantInt *Lower =
365         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
366     ConstantInt *Upper =
367         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
368     ConstantRange Range(Lower->getValue(), Upper->getValue());
369 
370     // The first CommonPrefixBits of all values in Range are equal.
371     unsigned CommonPrefixBits =
372         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
373 
374     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
375     KnownOne &= Range.getUnsignedMax() & Mask;
376     KnownZero &= ~Range.getUnsignedMax() & Mask;
377   }
378 }
379 
380 static bool isEphemeralValueOf(Instruction *I, const Value *E) {
381   SmallVector<const Value *, 16> WorkSet(1, I);
382   SmallPtrSet<const Value *, 32> Visited;
383   SmallPtrSet<const Value *, 16> EphValues;
384 
385   // The instruction defining an assumption's condition itself is always
386   // considered ephemeral to that assumption (even if it has other
387   // non-ephemeral users). See r246696's test case for an example.
388   if (std::find(I->op_begin(), I->op_end(), E) != I->op_end())
389     return true;
390 
391   while (!WorkSet.empty()) {
392     const Value *V = WorkSet.pop_back_val();
393     if (!Visited.insert(V).second)
394       continue;
395 
396     // If all uses of this value are ephemeral, then so is this value.
397     if (std::all_of(V->user_begin(), V->user_end(),
398                     [&](const User *U) { return EphValues.count(U); })) {
399       if (V == E)
400         return true;
401 
402       EphValues.insert(V);
403       if (const User *U = dyn_cast<User>(V))
404         for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
405              J != JE; ++J) {
406           if (isSafeToSpeculativelyExecute(*J))
407             WorkSet.push_back(*J);
408         }
409     }
410   }
411 
412   return false;
413 }
414 
415 // Is this an intrinsic that cannot be speculated but also cannot trap?
416 static bool isAssumeLikeIntrinsic(const Instruction *I) {
417   if (const CallInst *CI = dyn_cast<CallInst>(I))
418     if (Function *F = CI->getCalledFunction())
419       switch (F->getIntrinsicID()) {
420       default: break;
421       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
422       case Intrinsic::assume:
423       case Intrinsic::dbg_declare:
424       case Intrinsic::dbg_value:
425       case Intrinsic::invariant_start:
426       case Intrinsic::invariant_end:
427       case Intrinsic::lifetime_start:
428       case Intrinsic::lifetime_end:
429       case Intrinsic::objectsize:
430       case Intrinsic::ptr_annotation:
431       case Intrinsic::var_annotation:
432         return true;
433       }
434 
435   return false;
436 }
437 
438 static bool isValidAssumeForContext(Value *V, const Instruction *CxtI,
439                                     const DominatorTree *DT) {
440   Instruction *Inv = cast<Instruction>(V);
441 
442   // There are two restrictions on the use of an assume:
443   //  1. The assume must dominate the context (or the control flow must
444   //     reach the assume whenever it reaches the context).
445   //  2. The context must not be in the assume's set of ephemeral values
446   //     (otherwise we will use the assume to prove that the condition
447   //     feeding the assume is trivially true, thus causing the removal of
448   //     the assume).
449 
450   if (DT) {
451     if (DT->dominates(Inv, CxtI)) {
452       return true;
453     } else if (Inv->getParent() == CxtI->getParent()) {
454       // The context comes first, but they're both in the same block. Make sure
455       // there is nothing in between that might interrupt the control flow.
456       for (BasicBlock::const_iterator I =
457              std::next(BasicBlock::const_iterator(CxtI)),
458                                       IE(Inv); I != IE; ++I)
459         if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
460           return false;
461 
462       return !isEphemeralValueOf(Inv, CxtI);
463     }
464 
465     return false;
466   }
467 
468   // When we don't have a DT, we do a limited search...
469   if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
470     return true;
471   } else if (Inv->getParent() == CxtI->getParent()) {
472     // Search forward from the assume until we reach the context (or the end
473     // of the block); the common case is that the assume will come first.
474     for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
475          IE = Inv->getParent()->end(); I != IE; ++I)
476       if (&*I == CxtI)
477         return true;
478 
479     // The context must come first...
480     for (BasicBlock::const_iterator I =
481            std::next(BasicBlock::const_iterator(CxtI)),
482                                     IE(Inv); I != IE; ++I)
483       if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
484         return false;
485 
486     return !isEphemeralValueOf(Inv, CxtI);
487   }
488 
489   return false;
490 }
491 
492 bool llvm::isValidAssumeForContext(const Instruction *I,
493                                    const Instruction *CxtI,
494                                    const DominatorTree *DT) {
495   return ::isValidAssumeForContext(const_cast<Instruction *>(I), CxtI, DT);
496 }
497 
498 template<typename LHS, typename RHS>
499 inline match_combine_or<CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>,
500                         CmpClass_match<RHS, LHS, ICmpInst, ICmpInst::Predicate>>
501 m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
502   return m_CombineOr(m_ICmp(Pred, L, R), m_ICmp(Pred, R, L));
503 }
504 
505 template<typename LHS, typename RHS>
506 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::And>,
507                         BinaryOp_match<RHS, LHS, Instruction::And>>
508 m_c_And(const LHS &L, const RHS &R) {
509   return m_CombineOr(m_And(L, R), m_And(R, L));
510 }
511 
512 template<typename LHS, typename RHS>
513 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Or>,
514                         BinaryOp_match<RHS, LHS, Instruction::Or>>
515 m_c_Or(const LHS &L, const RHS &R) {
516   return m_CombineOr(m_Or(L, R), m_Or(R, L));
517 }
518 
519 template<typename LHS, typename RHS>
520 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Xor>,
521                         BinaryOp_match<RHS, LHS, Instruction::Xor>>
522 m_c_Xor(const LHS &L, const RHS &R) {
523   return m_CombineOr(m_Xor(L, R), m_Xor(R, L));
524 }
525 
526 static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
527                                        APInt &KnownOne, unsigned Depth,
528                                        const Query &Q) {
529   // Use of assumptions is context-sensitive. If we don't have a context, we
530   // cannot use them!
531   if (!Q.AC || !Q.CxtI)
532     return;
533 
534   unsigned BitWidth = KnownZero.getBitWidth();
535 
536   for (auto &AssumeVH : Q.AC->assumptions()) {
537     if (!AssumeVH)
538       continue;
539     CallInst *I = cast<CallInst>(AssumeVH);
540     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
541            "Got assumption for the wrong function!");
542     if (Q.isExcluded(I))
543       continue;
544 
545     // Warning: This loop can end up being somewhat performance sensetive.
546     // We're running this loop for once for each value queried resulting in a
547     // runtime of ~O(#assumes * #values).
548 
549     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
550            "must be an assume intrinsic");
551 
552     Value *Arg = I->getArgOperand(0);
553 
554     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
555       assert(BitWidth == 1 && "assume operand is not i1?");
556       KnownZero.clearAllBits();
557       KnownOne.setAllBits();
558       return;
559     }
560 
561     // The remaining tests are all recursive, so bail out if we hit the limit.
562     if (Depth == MaxDepth)
563       continue;
564 
565     Value *A, *B;
566     auto m_V = m_CombineOr(m_Specific(V),
567                            m_CombineOr(m_PtrToInt(m_Specific(V)),
568                            m_BitCast(m_Specific(V))));
569 
570     CmpInst::Predicate Pred;
571     ConstantInt *C;
572     // assume(v = a)
573     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
574         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
575       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
576       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
577       KnownZero |= RHSKnownZero;
578       KnownOne  |= RHSKnownOne;
579     // assume(v & b = a)
580     } else if (match(Arg,
581                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
582                Pred == ICmpInst::ICMP_EQ &&
583                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
584       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
585       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
586       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
587       computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
588 
589       // For those bits in the mask that are known to be one, we can propagate
590       // known bits from the RHS to V.
591       KnownZero |= RHSKnownZero & MaskKnownOne;
592       KnownOne  |= RHSKnownOne  & MaskKnownOne;
593     // assume(~(v & b) = a)
594     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
595                                    m_Value(A))) &&
596                Pred == ICmpInst::ICMP_EQ &&
597                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
598       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
599       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
600       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
601       computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
602 
603       // For those bits in the mask that are known to be one, we can propagate
604       // inverted known bits from the RHS to V.
605       KnownZero |= RHSKnownOne  & MaskKnownOne;
606       KnownOne  |= RHSKnownZero & MaskKnownOne;
607     // assume(v | b = a)
608     } else if (match(Arg,
609                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
610                Pred == ICmpInst::ICMP_EQ &&
611                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
612       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
613       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
614       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
615       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
616 
617       // For those bits in B that are known to be zero, we can propagate known
618       // bits from the RHS to V.
619       KnownZero |= RHSKnownZero & BKnownZero;
620       KnownOne  |= RHSKnownOne  & BKnownZero;
621     // assume(~(v | b) = a)
622     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
623                                    m_Value(A))) &&
624                Pred == ICmpInst::ICMP_EQ &&
625                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
626       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
627       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
628       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
629       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
630 
631       // For those bits in B that are known to be zero, we can propagate
632       // inverted known bits from the RHS to V.
633       KnownZero |= RHSKnownOne  & BKnownZero;
634       KnownOne  |= RHSKnownZero & BKnownZero;
635     // assume(v ^ b = a)
636     } else if (match(Arg,
637                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
638                Pred == ICmpInst::ICMP_EQ &&
639                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
640       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
641       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
642       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
643       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
644 
645       // For those bits in B that are known to be zero, we can propagate known
646       // bits from the RHS to V. For those bits in B that are known to be one,
647       // we can propagate inverted known bits from the RHS to V.
648       KnownZero |= RHSKnownZero & BKnownZero;
649       KnownOne  |= RHSKnownOne  & BKnownZero;
650       KnownZero |= RHSKnownOne  & BKnownOne;
651       KnownOne  |= RHSKnownZero & BKnownOne;
652     // assume(~(v ^ b) = a)
653     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
654                                    m_Value(A))) &&
655                Pred == ICmpInst::ICMP_EQ &&
656                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
657       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
658       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
659       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
660       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
661 
662       // For those bits in B that are known to be zero, we can propagate
663       // inverted known bits from the RHS to V. For those bits in B that are
664       // known to be one, we can propagate known bits from the RHS to V.
665       KnownZero |= RHSKnownOne  & BKnownZero;
666       KnownOne  |= RHSKnownZero & BKnownZero;
667       KnownZero |= RHSKnownZero & BKnownOne;
668       KnownOne  |= RHSKnownOne  & BKnownOne;
669     // assume(v << c = a)
670     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
671                                    m_Value(A))) &&
672                Pred == ICmpInst::ICMP_EQ &&
673                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
674       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
675       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
676       // For those bits in RHS that are known, we can propagate them to known
677       // bits in V shifted to the right by C.
678       KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
679       KnownOne  |= RHSKnownOne.lshr(C->getZExtValue());
680     // assume(~(v << c) = a)
681     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
682                                    m_Value(A))) &&
683                Pred == ICmpInst::ICMP_EQ &&
684                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
685       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
686       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
687       // For those bits in RHS that are known, we can propagate them inverted
688       // to known bits in V shifted to the right by C.
689       KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
690       KnownOne  |= RHSKnownZero.lshr(C->getZExtValue());
691     // assume(v >> c = a)
692     } else if (match(Arg,
693                      m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
694                                                 m_AShr(m_V, m_ConstantInt(C))),
695                               m_Value(A))) &&
696                Pred == ICmpInst::ICMP_EQ &&
697                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
698       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
699       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
700       // For those bits in RHS that are known, we can propagate them to known
701       // bits in V shifted to the right by C.
702       KnownZero |= RHSKnownZero << C->getZExtValue();
703       KnownOne  |= RHSKnownOne  << C->getZExtValue();
704     // assume(~(v >> c) = a)
705     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
706                                              m_LShr(m_V, m_ConstantInt(C)),
707                                              m_AShr(m_V, m_ConstantInt(C)))),
708                                    m_Value(A))) &&
709                Pred == ICmpInst::ICMP_EQ &&
710                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
711       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
712       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
713       // For those bits in RHS that are known, we can propagate them inverted
714       // to known bits in V shifted to the right by C.
715       KnownZero |= RHSKnownOne  << C->getZExtValue();
716       KnownOne  |= RHSKnownZero << C->getZExtValue();
717     // assume(v >=_s c) where c is non-negative
718     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
719                Pred == ICmpInst::ICMP_SGE &&
720                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
721       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
722       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
723 
724       if (RHSKnownZero.isNegative()) {
725         // We know that the sign bit is zero.
726         KnownZero |= APInt::getSignBit(BitWidth);
727       }
728     // assume(v >_s c) where c is at least -1.
729     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
730                Pred == ICmpInst::ICMP_SGT &&
731                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
732       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
733       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
734 
735       if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
736         // We know that the sign bit is zero.
737         KnownZero |= APInt::getSignBit(BitWidth);
738       }
739     // assume(v <=_s c) where c is negative
740     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
741                Pred == ICmpInst::ICMP_SLE &&
742                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
743       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
744       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
745 
746       if (RHSKnownOne.isNegative()) {
747         // We know that the sign bit is one.
748         KnownOne |= APInt::getSignBit(BitWidth);
749       }
750     // assume(v <_s c) where c is non-positive
751     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
752                Pred == ICmpInst::ICMP_SLT &&
753                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
754       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
755       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
756 
757       if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
758         // We know that the sign bit is one.
759         KnownOne |= APInt::getSignBit(BitWidth);
760       }
761     // assume(v <=_u c)
762     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
763                Pred == ICmpInst::ICMP_ULE &&
764                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
765       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
766       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
767 
768       // Whatever high bits in c are zero are known to be zero.
769       KnownZero |=
770         APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
771     // assume(v <_u c)
772     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
773                Pred == ICmpInst::ICMP_ULT &&
774                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
775       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
776       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
777 
778       // Whatever high bits in c are zero are known to be zero (if c is a power
779       // of 2, then one more).
780       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
781         KnownZero |=
782           APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1);
783       else
784         KnownZero |=
785           APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
786     }
787   }
788 }
789 
790 // Compute known bits from a shift operator, including those with a
791 // non-constant shift amount. KnownZero and KnownOne are the outputs of this
792 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the
793 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific
794 // functors that, given the known-zero or known-one bits respectively, and a
795 // shift amount, compute the implied known-zero or known-one bits of the shift
796 // operator's result respectively for that shift amount. The results from calling
797 // KZF and KOF are conservatively combined for all permitted shift amounts.
798 template <typename KZFunctor, typename KOFunctor>
799 static void computeKnownBitsFromShiftOperator(Operator *I,
800               APInt &KnownZero, APInt &KnownOne,
801               APInt &KnownZero2, APInt &KnownOne2,
802               unsigned Depth, const Query &Q, KZFunctor KZF, KOFunctor KOF) {
803   unsigned BitWidth = KnownZero.getBitWidth();
804 
805   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
806     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
807 
808     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
809     KnownZero = KZF(KnownZero, ShiftAmt);
810     KnownOne  = KOF(KnownOne, ShiftAmt);
811     return;
812   }
813 
814   computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
815 
816   // Note: We cannot use KnownZero.getLimitedValue() here, because if
817   // BitWidth > 64 and any upper bits are known, we'll end up returning the
818   // limit value (which implies all bits are known).
819   uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue();
820   uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue();
821 
822   // It would be more-clearly correct to use the two temporaries for this
823   // calculation. Reusing the APInts here to prevent unnecessary allocations.
824   KnownZero.clearAllBits();
825   KnownOne.clearAllBits();
826 
827   // If we know the shifter operand is nonzero, we can sometimes infer more
828   // known bits. However this is expensive to compute, so be lazy about it and
829   // only compute it when absolutely necessary.
830   Optional<bool> ShifterOperandIsNonZero;
831 
832   // Early exit if we can't constrain any well-defined shift amount.
833   if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
834     ShifterOperandIsNonZero =
835         isKnownNonZero(I->getOperand(1), Depth + 1, Q);
836     if (!*ShifterOperandIsNonZero)
837       return;
838   }
839 
840   computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
841 
842   KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
843   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
844     // Combine the shifted known input bits only for those shift amounts
845     // compatible with its known constraints.
846     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
847       continue;
848     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
849       continue;
850     // If we know the shifter is nonzero, we may be able to infer more known
851     // bits. This check is sunk down as far as possible to avoid the expensive
852     // call to isKnownNonZero if the cheaper checks above fail.
853     if (ShiftAmt == 0) {
854       if (!ShifterOperandIsNonZero.hasValue())
855         ShifterOperandIsNonZero =
856             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
857       if (*ShifterOperandIsNonZero)
858         continue;
859     }
860 
861     KnownZero &= KZF(KnownZero2, ShiftAmt);
862     KnownOne  &= KOF(KnownOne2, ShiftAmt);
863   }
864 
865   // If there are no compatible shift amounts, then we've proven that the shift
866   // amount must be >= the BitWidth, and the result is undefined. We could
867   // return anything we'd like, but we need to make sure the sets of known bits
868   // stay disjoint (it should be better for some other code to actually
869   // propagate the undef than to pick a value here using known bits).
870   if ((KnownZero & KnownOne) != 0) {
871     KnownZero.clearAllBits();
872     KnownOne.clearAllBits();
873   }
874 }
875 
876 static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
877                                          APInt &KnownOne, unsigned Depth,
878                                          const Query &Q) {
879   unsigned BitWidth = KnownZero.getBitWidth();
880 
881   APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
882   switch (I->getOpcode()) {
883   default: break;
884   case Instruction::Load:
885     if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
886       computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
887     break;
888   case Instruction::And: {
889     // If either the LHS or the RHS are Zero, the result is zero.
890     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
891     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
892 
893     // Output known-1 bits are only known if set in both the LHS & RHS.
894     KnownOne &= KnownOne2;
895     // Output known-0 are known to be clear if zero in either the LHS | RHS.
896     KnownZero |= KnownZero2;
897 
898     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
899     // here we handle the more general case of adding any odd number by
900     // matching the form add(x, add(x, y)) where y is odd.
901     // TODO: This could be generalized to clearing any bit set in y where the
902     // following bit is known to be unset in y.
903     Value *Y = nullptr;
904     if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
905                                       m_Value(Y))) ||
906         match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
907                                       m_Value(Y)))) {
908       APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0);
909       computeKnownBits(Y, KnownZero3, KnownOne3, Depth + 1, Q);
910       if (KnownOne3.countTrailingOnes() > 0)
911         KnownZero |= APInt::getLowBitsSet(BitWidth, 1);
912     }
913     break;
914   }
915   case Instruction::Or: {
916     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
917     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
918 
919     // Output known-0 bits are only known if clear in both the LHS & RHS.
920     KnownZero &= KnownZero2;
921     // Output known-1 are known to be set if set in either the LHS | RHS.
922     KnownOne |= KnownOne2;
923     break;
924   }
925   case Instruction::Xor: {
926     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
927     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
928 
929     // Output known-0 bits are known if clear or set in both the LHS & RHS.
930     APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
931     // Output known-1 are known to be set if set in only one of the LHS, RHS.
932     KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
933     KnownZero = KnownZeroOut;
934     break;
935   }
936   case Instruction::Mul: {
937     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
938     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
939                         KnownOne, KnownZero2, KnownOne2, Depth, Q);
940     break;
941   }
942   case Instruction::UDiv: {
943     // For the purposes of computing leading zeros we can conservatively
944     // treat a udiv as a logical right shift by the power of 2 known to
945     // be less than the denominator.
946     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
947     unsigned LeadZ = KnownZero2.countLeadingOnes();
948 
949     KnownOne2.clearAllBits();
950     KnownZero2.clearAllBits();
951     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
952     unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
953     if (RHSUnknownLeadingOnes != BitWidth)
954       LeadZ = std::min(BitWidth,
955                        LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
956 
957     KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
958     break;
959   }
960   case Instruction::Select:
961     computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
962     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
963 
964     // Only known if known in both the LHS and RHS.
965     KnownOne &= KnownOne2;
966     KnownZero &= KnownZero2;
967     break;
968   case Instruction::FPTrunc:
969   case Instruction::FPExt:
970   case Instruction::FPToUI:
971   case Instruction::FPToSI:
972   case Instruction::SIToFP:
973   case Instruction::UIToFP:
974     break; // Can't work with floating point.
975   case Instruction::PtrToInt:
976   case Instruction::IntToPtr:
977   case Instruction::AddrSpaceCast: // Pointers could be different sizes.
978     // FALL THROUGH and handle them the same as zext/trunc.
979   case Instruction::ZExt:
980   case Instruction::Trunc: {
981     Type *SrcTy = I->getOperand(0)->getType();
982 
983     unsigned SrcBitWidth;
984     // Note that we handle pointer operands here because of inttoptr/ptrtoint
985     // which fall through here.
986     SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
987 
988     assert(SrcBitWidth && "SrcBitWidth can't be zero");
989     KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
990     KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
991     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
992     KnownZero = KnownZero.zextOrTrunc(BitWidth);
993     KnownOne = KnownOne.zextOrTrunc(BitWidth);
994     // Any top bits are known to be zero.
995     if (BitWidth > SrcBitWidth)
996       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
997     break;
998   }
999   case Instruction::BitCast: {
1000     Type *SrcTy = I->getOperand(0)->getType();
1001     if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy() ||
1002          SrcTy->isFloatingPointTy()) &&
1003         // TODO: For now, not handling conversions like:
1004         // (bitcast i64 %x to <2 x i32>)
1005         !I->getType()->isVectorTy()) {
1006       computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1007       break;
1008     }
1009     break;
1010   }
1011   case Instruction::SExt: {
1012     // Compute the bits in the result that are not present in the input.
1013     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1014 
1015     KnownZero = KnownZero.trunc(SrcBitWidth);
1016     KnownOne = KnownOne.trunc(SrcBitWidth);
1017     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1018     KnownZero = KnownZero.zext(BitWidth);
1019     KnownOne = KnownOne.zext(BitWidth);
1020 
1021     // If the sign bit of the input is known set or clear, then we know the
1022     // top bits of the result.
1023     if (KnownZero[SrcBitWidth-1])             // Input sign bit known zero
1024       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1025     else if (KnownOne[SrcBitWidth-1])           // Input sign bit known set
1026       KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1027     break;
1028   }
1029   case Instruction::Shl: {
1030     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1031     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1032       return (KnownZero << ShiftAmt) |
1033              APInt::getLowBitsSet(BitWidth, ShiftAmt); // Low bits known 0.
1034     };
1035 
1036     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1037       return KnownOne << ShiftAmt;
1038     };
1039 
1040     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1041                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1042                                       KOF);
1043     break;
1044   }
1045   case Instruction::LShr: {
1046     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1047     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1048       return APIntOps::lshr(KnownZero, ShiftAmt) |
1049              // High bits known zero.
1050              APInt::getHighBitsSet(BitWidth, ShiftAmt);
1051     };
1052 
1053     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1054       return APIntOps::lshr(KnownOne, ShiftAmt);
1055     };
1056 
1057     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1058                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1059                                       KOF);
1060     break;
1061   }
1062   case Instruction::AShr: {
1063     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1064     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1065       return APIntOps::ashr(KnownZero, ShiftAmt);
1066     };
1067 
1068     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1069       return APIntOps::ashr(KnownOne, ShiftAmt);
1070     };
1071 
1072     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1073                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1074                                       KOF);
1075     break;
1076   }
1077   case Instruction::Sub: {
1078     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1079     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1080                            KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1081                            Q);
1082     break;
1083   }
1084   case Instruction::Add: {
1085     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1086     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1087                            KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1088                            Q);
1089     break;
1090   }
1091   case Instruction::SRem:
1092     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1093       APInt RA = Rem->getValue().abs();
1094       if (RA.isPowerOf2()) {
1095         APInt LowBits = RA - 1;
1096         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1,
1097                          Q);
1098 
1099         // The low bits of the first operand are unchanged by the srem.
1100         KnownZero = KnownZero2 & LowBits;
1101         KnownOne = KnownOne2 & LowBits;
1102 
1103         // If the first operand is non-negative or has all low bits zero, then
1104         // the upper bits are all zero.
1105         if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
1106           KnownZero |= ~LowBits;
1107 
1108         // If the first operand is negative and not all low bits are zero, then
1109         // the upper bits are all one.
1110         if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
1111           KnownOne |= ~LowBits;
1112 
1113         assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1114       }
1115     }
1116 
1117     // The sign bit is the LHS's sign bit, except when the result of the
1118     // remainder is zero.
1119     if (KnownZero.isNonNegative()) {
1120       APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
1121       computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
1122                        Q);
1123       // If it's known zero, our sign bit is also zero.
1124       if (LHSKnownZero.isNegative())
1125         KnownZero.setBit(BitWidth - 1);
1126     }
1127 
1128     break;
1129   case Instruction::URem: {
1130     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1131       APInt RA = Rem->getValue();
1132       if (RA.isPowerOf2()) {
1133         APInt LowBits = (RA - 1);
1134         computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1135         KnownZero |= ~LowBits;
1136         KnownOne &= LowBits;
1137         break;
1138       }
1139     }
1140 
1141     // Since the result is less than or equal to either operand, any leading
1142     // zero bits in either operand must also exist in the result.
1143     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1144     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
1145 
1146     unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
1147                                 KnownZero2.countLeadingOnes());
1148     KnownOne.clearAllBits();
1149     KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
1150     break;
1151   }
1152 
1153   case Instruction::Alloca: {
1154     AllocaInst *AI = cast<AllocaInst>(I);
1155     unsigned Align = AI->getAlignment();
1156     if (Align == 0)
1157       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1158 
1159     if (Align > 0)
1160       KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1161     break;
1162   }
1163   case Instruction::GetElementPtr: {
1164     // Analyze all of the subscripts of this getelementptr instruction
1165     // to determine if we can prove known low zero bits.
1166     APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1167     computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, Depth + 1,
1168                      Q);
1169     unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1170 
1171     gep_type_iterator GTI = gep_type_begin(I);
1172     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1173       Value *Index = I->getOperand(i);
1174       if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1175         // Handle struct member offset arithmetic.
1176 
1177         // Handle case when index is vector zeroinitializer
1178         Constant *CIndex = cast<Constant>(Index);
1179         if (CIndex->isZeroValue())
1180           continue;
1181 
1182         if (CIndex->getType()->isVectorTy())
1183           Index = CIndex->getSplatValue();
1184 
1185         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1186         const StructLayout *SL = Q.DL.getStructLayout(STy);
1187         uint64_t Offset = SL->getElementOffset(Idx);
1188         TrailZ = std::min<unsigned>(TrailZ,
1189                                     countTrailingZeros(Offset));
1190       } else {
1191         // Handle array index arithmetic.
1192         Type *IndexedTy = GTI.getIndexedType();
1193         if (!IndexedTy->isSized()) {
1194           TrailZ = 0;
1195           break;
1196         }
1197         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1198         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1199         LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1200         computeKnownBits(Index, LocalKnownZero, LocalKnownOne, Depth + 1, Q);
1201         TrailZ = std::min(TrailZ,
1202                           unsigned(countTrailingZeros(TypeSize) +
1203                                    LocalKnownZero.countTrailingOnes()));
1204       }
1205     }
1206 
1207     KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
1208     break;
1209   }
1210   case Instruction::PHI: {
1211     PHINode *P = cast<PHINode>(I);
1212     // Handle the case of a simple two-predecessor recurrence PHI.
1213     // There's a lot more that could theoretically be done here, but
1214     // this is sufficient to catch some interesting cases.
1215     if (P->getNumIncomingValues() == 2) {
1216       for (unsigned i = 0; i != 2; ++i) {
1217         Value *L = P->getIncomingValue(i);
1218         Value *R = P->getIncomingValue(!i);
1219         Operator *LU = dyn_cast<Operator>(L);
1220         if (!LU)
1221           continue;
1222         unsigned Opcode = LU->getOpcode();
1223         // Check for operations that have the property that if
1224         // both their operands have low zero bits, the result
1225         // will have low zero bits.
1226         if (Opcode == Instruction::Add ||
1227             Opcode == Instruction::Sub ||
1228             Opcode == Instruction::And ||
1229             Opcode == Instruction::Or ||
1230             Opcode == Instruction::Mul) {
1231           Value *LL = LU->getOperand(0);
1232           Value *LR = LU->getOperand(1);
1233           // Find a recurrence.
1234           if (LL == I)
1235             L = LR;
1236           else if (LR == I)
1237             L = LL;
1238           else
1239             break;
1240           // Ok, we have a PHI of the form L op= R. Check for low
1241           // zero bits.
1242           computeKnownBits(R, KnownZero2, KnownOne2, Depth + 1, Q);
1243 
1244           // We need to take the minimum number of known bits
1245           APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
1246           computeKnownBits(L, KnownZero3, KnownOne3, Depth + 1, Q);
1247 
1248           KnownZero = APInt::getLowBitsSet(BitWidth,
1249                                            std::min(KnownZero2.countTrailingOnes(),
1250                                                     KnownZero3.countTrailingOnes()));
1251           break;
1252         }
1253       }
1254     }
1255 
1256     // Unreachable blocks may have zero-operand PHI nodes.
1257     if (P->getNumIncomingValues() == 0)
1258       break;
1259 
1260     // Otherwise take the unions of the known bit sets of the operands,
1261     // taking conservative care to avoid excessive recursion.
1262     if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
1263       // Skip if every incoming value references to ourself.
1264       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1265         break;
1266 
1267       KnownZero = APInt::getAllOnesValue(BitWidth);
1268       KnownOne = APInt::getAllOnesValue(BitWidth);
1269       for (Value *IncValue : P->incoming_values()) {
1270         // Skip direct self references.
1271         if (IncValue == P) continue;
1272 
1273         KnownZero2 = APInt(BitWidth, 0);
1274         KnownOne2 = APInt(BitWidth, 0);
1275         // Recurse, but cap the recursion to one level, because we don't
1276         // want to waste time spinning around in loops.
1277         computeKnownBits(IncValue, KnownZero2, KnownOne2, MaxDepth - 1, Q);
1278         KnownZero &= KnownZero2;
1279         KnownOne &= KnownOne2;
1280         // If all bits have been ruled out, there's no need to check
1281         // more operands.
1282         if (!KnownZero && !KnownOne)
1283           break;
1284       }
1285     }
1286     break;
1287   }
1288   case Instruction::Call:
1289   case Instruction::Invoke:
1290     if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1291       computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
1292     // If a range metadata is attached to this IntrinsicInst, intersect the
1293     // explicit range specified by the metadata and the implicit range of
1294     // the intrinsic.
1295     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1296       switch (II->getIntrinsicID()) {
1297       default: break;
1298       case Intrinsic::bswap:
1299         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1300         KnownZero |= KnownZero2.byteSwap();
1301         KnownOne |= KnownOne2.byteSwap();
1302         break;
1303       case Intrinsic::ctlz:
1304       case Intrinsic::cttz: {
1305         unsigned LowBits = Log2_32(BitWidth)+1;
1306         // If this call is undefined for 0, the result will be less than 2^n.
1307         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1308           LowBits -= 1;
1309         KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1310         break;
1311       }
1312       case Intrinsic::ctpop: {
1313         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1314         // We can bound the space the count needs.  Also, bits known to be zero
1315         // can't contribute to the population.
1316         unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
1317         unsigned LeadingZeros =
1318           APInt(BitWidth, BitsPossiblySet).countLeadingZeros();
1319         assert(LeadingZeros <= BitWidth);
1320         KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros);
1321         KnownOne &= ~KnownZero;
1322         // TODO: we could bound KnownOne using the lower bound on the number
1323         // of bits which might be set provided by popcnt KnownOne2.
1324         break;
1325       }
1326       case Intrinsic::fabs: {
1327         Type *Ty = II->getType();
1328         APInt SignBit = APInt::getSignBit(Ty->getScalarSizeInBits());
1329         KnownZero |= APInt::getSplat(Ty->getPrimitiveSizeInBits(), SignBit);
1330         break;
1331       }
1332       case Intrinsic::x86_sse42_crc32_64_64:
1333         KnownZero |= APInt::getHighBitsSet(64, 32);
1334         break;
1335       }
1336     }
1337     break;
1338   case Instruction::ExtractValue:
1339     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1340       ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1341       if (EVI->getNumIndices() != 1) break;
1342       if (EVI->getIndices()[0] == 0) {
1343         switch (II->getIntrinsicID()) {
1344         default: break;
1345         case Intrinsic::uadd_with_overflow:
1346         case Intrinsic::sadd_with_overflow:
1347           computeKnownBitsAddSub(true, II->getArgOperand(0),
1348                                  II->getArgOperand(1), false, KnownZero,
1349                                  KnownOne, KnownZero2, KnownOne2, Depth, Q);
1350           break;
1351         case Intrinsic::usub_with_overflow:
1352         case Intrinsic::ssub_with_overflow:
1353           computeKnownBitsAddSub(false, II->getArgOperand(0),
1354                                  II->getArgOperand(1), false, KnownZero,
1355                                  KnownOne, KnownZero2, KnownOne2, Depth, Q);
1356           break;
1357         case Intrinsic::umul_with_overflow:
1358         case Intrinsic::smul_with_overflow:
1359           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1360                               KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1361                               Q);
1362           break;
1363         }
1364       }
1365     }
1366   }
1367 }
1368 
1369 /// Determine which bits of V are known to be either zero or one and return
1370 /// them in the KnownZero/KnownOne bit sets.
1371 ///
1372 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1373 /// we cannot optimize based on the assumption that it is zero without changing
1374 /// it to be an explicit zero.  If we don't change it to zero, other code could
1375 /// optimized based on the contradictory assumption that it is non-zero.
1376 /// Because instcombine aggressively folds operations with undef args anyway,
1377 /// this won't lose us code quality.
1378 ///
1379 /// This function is defined on values with integer type, values with pointer
1380 /// type, and vectors of integers.  In the case
1381 /// where V is a vector, known zero, and known one values are the
1382 /// same width as the vector element, and the bit is set only if it is true
1383 /// for all of the elements in the vector.
1384 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
1385                       unsigned Depth, const Query &Q) {
1386   assert(V && "No Value?");
1387   assert(Depth <= MaxDepth && "Limit Search Depth");
1388   unsigned BitWidth = KnownZero.getBitWidth();
1389 
1390   assert((V->getType()->isIntOrIntVectorTy() ||
1391           V->getType()->isFPOrFPVectorTy() ||
1392           V->getType()->getScalarType()->isPointerTy()) &&
1393          "Not integer, floating point, or pointer type!");
1394   assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1395          (!V->getType()->isIntOrIntVectorTy() ||
1396           V->getType()->getScalarSizeInBits() == BitWidth) &&
1397          KnownZero.getBitWidth() == BitWidth &&
1398          KnownOne.getBitWidth() == BitWidth &&
1399          "V, KnownOne and KnownZero should have same BitWidth");
1400 
1401   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1402     // We know all of the bits for a constant!
1403     KnownOne = CI->getValue();
1404     KnownZero = ~KnownOne;
1405     return;
1406   }
1407   // Null and aggregate-zero are all-zeros.
1408   if (isa<ConstantPointerNull>(V) ||
1409       isa<ConstantAggregateZero>(V)) {
1410     KnownOne.clearAllBits();
1411     KnownZero = APInt::getAllOnesValue(BitWidth);
1412     return;
1413   }
1414   // Handle a constant vector by taking the intersection of the known bits of
1415   // each element.  There is no real need to handle ConstantVector here, because
1416   // we don't handle undef in any particularly useful way.
1417   if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1418     // We know that CDS must be a vector of integers. Take the intersection of
1419     // each element.
1420     KnownZero.setAllBits(); KnownOne.setAllBits();
1421     APInt Elt(KnownZero.getBitWidth(), 0);
1422     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1423       Elt = CDS->getElementAsInteger(i);
1424       KnownZero &= ~Elt;
1425       KnownOne &= Elt;
1426     }
1427     return;
1428   }
1429 
1430   // Start out not knowing anything.
1431   KnownZero.clearAllBits(); KnownOne.clearAllBits();
1432 
1433   // Limit search depth.
1434   // All recursive calls that increase depth must come after this.
1435   if (Depth == MaxDepth)
1436     return;
1437 
1438   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1439   // the bits of its aliasee.
1440   if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1441     if (!GA->mayBeOverridden())
1442       computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q);
1443     return;
1444   }
1445 
1446   if (Operator *I = dyn_cast<Operator>(V))
1447     computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q);
1448 
1449   // Aligned pointers have trailing zeros - refine KnownZero set
1450   if (V->getType()->isPointerTy()) {
1451     unsigned Align = V->getPointerAlignment(Q.DL);
1452     if (Align)
1453       KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1454   }
1455 
1456   // computeKnownBitsFromAssume strictly refines KnownZero and
1457   // KnownOne. Therefore, we run them after computeKnownBitsFromOperator.
1458 
1459   // Check whether a nearby assume intrinsic can determine some known bits.
1460   computeKnownBitsFromAssume(V, KnownZero, KnownOne, Depth, Q);
1461 
1462   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1463 }
1464 
1465 /// Determine whether the sign bit is known to be zero or one.
1466 /// Convenience wrapper around computeKnownBits.
1467 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
1468                     unsigned Depth, const Query &Q) {
1469   unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
1470   if (!BitWidth) {
1471     KnownZero = false;
1472     KnownOne = false;
1473     return;
1474   }
1475   APInt ZeroBits(BitWidth, 0);
1476   APInt OneBits(BitWidth, 0);
1477   computeKnownBits(V, ZeroBits, OneBits, Depth, Q);
1478   KnownOne = OneBits[BitWidth - 1];
1479   KnownZero = ZeroBits[BitWidth - 1];
1480 }
1481 
1482 /// Return true if the given value is known to have exactly one
1483 /// bit set when defined. For vectors return true if every element is known to
1484 /// be a power of two when defined. Supports values with integer or pointer
1485 /// types and vectors of integers.
1486 bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
1487                             const Query &Q) {
1488   if (Constant *C = dyn_cast<Constant>(V)) {
1489     if (C->isNullValue())
1490       return OrZero;
1491     if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1492       return CI->getValue().isPowerOf2();
1493     // TODO: Handle vector constants.
1494   }
1495 
1496   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1497   // it is shifted off the end then the result is undefined.
1498   if (match(V, m_Shl(m_One(), m_Value())))
1499     return true;
1500 
1501   // (signbit) >>l X is clearly a power of two if the one is not shifted off the
1502   // bottom.  If it is shifted off the bottom then the result is undefined.
1503   if (match(V, m_LShr(m_SignBit(), m_Value())))
1504     return true;
1505 
1506   // The remaining tests are all recursive, so bail out if we hit the limit.
1507   if (Depth++ == MaxDepth)
1508     return false;
1509 
1510   Value *X = nullptr, *Y = nullptr;
1511   // A shift left or a logical shift right of a power of two is a power of two
1512   // or zero.
1513   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1514                  match(V, m_LShr(m_Value(X), m_Value()))))
1515     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1516 
1517   if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1518     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1519 
1520   if (SelectInst *SI = dyn_cast<SelectInst>(V))
1521     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1522            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1523 
1524   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1525     // A power of two and'd with anything is a power of two or zero.
1526     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1527         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1528       return true;
1529     // X & (-X) is always a power of two or zero.
1530     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1531       return true;
1532     return false;
1533   }
1534 
1535   // Adding a power-of-two or zero to the same power-of-two or zero yields
1536   // either the original power-of-two, a larger power-of-two or zero.
1537   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1538     OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1539     if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1540       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1541           match(X, m_And(m_Value(), m_Specific(Y))))
1542         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1543           return true;
1544       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1545           match(Y, m_And(m_Value(), m_Specific(X))))
1546         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1547           return true;
1548 
1549       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1550       APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
1551       computeKnownBits(X, LHSZeroBits, LHSOneBits, Depth, Q);
1552 
1553       APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
1554       computeKnownBits(Y, RHSZeroBits, RHSOneBits, Depth, Q);
1555       // If i8 V is a power of two or zero:
1556       //  ZeroBits: 1 1 1 0 1 1 1 1
1557       // ~ZeroBits: 0 0 0 1 0 0 0 0
1558       if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
1559         // If OrZero isn't set, we cannot give back a zero result.
1560         // Make sure either the LHS or RHS has a bit set.
1561         if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
1562           return true;
1563     }
1564   }
1565 
1566   // An exact divide or right shift can only shift off zero bits, so the result
1567   // is a power of two only if the first operand is a power of two and not
1568   // copying a sign bit (sdiv int_min, 2).
1569   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1570       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1571     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1572                                   Depth, Q);
1573   }
1574 
1575   return false;
1576 }
1577 
1578 /// \brief Test whether a GEP's result is known to be non-null.
1579 ///
1580 /// Uses properties inherent in a GEP to try to determine whether it is known
1581 /// to be non-null.
1582 ///
1583 /// Currently this routine does not support vector GEPs.
1584 static bool isGEPKnownNonNull(GEPOperator *GEP, unsigned Depth,
1585                               const Query &Q) {
1586   if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1587     return false;
1588 
1589   // FIXME: Support vector-GEPs.
1590   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1591 
1592   // If the base pointer is non-null, we cannot walk to a null address with an
1593   // inbounds GEP in address space zero.
1594   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1595     return true;
1596 
1597   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1598   // If so, then the GEP cannot produce a null pointer, as doing so would
1599   // inherently violate the inbounds contract within address space zero.
1600   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1601        GTI != GTE; ++GTI) {
1602     // Struct types are easy -- they must always be indexed by a constant.
1603     if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1604       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1605       unsigned ElementIdx = OpC->getZExtValue();
1606       const StructLayout *SL = Q.DL.getStructLayout(STy);
1607       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1608       if (ElementOffset > 0)
1609         return true;
1610       continue;
1611     }
1612 
1613     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1614     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1615       continue;
1616 
1617     // Fast path the constant operand case both for efficiency and so we don't
1618     // increment Depth when just zipping down an all-constant GEP.
1619     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1620       if (!OpC->isZero())
1621         return true;
1622       continue;
1623     }
1624 
1625     // We post-increment Depth here because while isKnownNonZero increments it
1626     // as well, when we pop back up that increment won't persist. We don't want
1627     // to recurse 10k times just because we have 10k GEP operands. We don't
1628     // bail completely out because we want to handle constant GEPs regardless
1629     // of depth.
1630     if (Depth++ >= MaxDepth)
1631       continue;
1632 
1633     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1634       return true;
1635   }
1636 
1637   return false;
1638 }
1639 
1640 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1641 /// ensure that the value it's attached to is never Value?  'RangeType' is
1642 /// is the type of the value described by the range.
1643 static bool rangeMetadataExcludesValue(MDNode* Ranges,
1644                                        const APInt& Value) {
1645   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1646   assert(NumRanges >= 1);
1647   for (unsigned i = 0; i < NumRanges; ++i) {
1648     ConstantInt *Lower =
1649         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1650     ConstantInt *Upper =
1651         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1652     ConstantRange Range(Lower->getValue(), Upper->getValue());
1653     if (Range.contains(Value))
1654       return false;
1655   }
1656   return true;
1657 }
1658 
1659 /// Return true if the given value is known to be non-zero when defined.
1660 /// For vectors return true if every element is known to be non-zero when
1661 /// defined. Supports values with integer or pointer type and vectors of
1662 /// integers.
1663 bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q) {
1664   if (Constant *C = dyn_cast<Constant>(V)) {
1665     if (C->isNullValue())
1666       return false;
1667     if (isa<ConstantInt>(C))
1668       // Must be non-zero due to null test above.
1669       return true;
1670     // TODO: Handle vectors
1671     return false;
1672   }
1673 
1674   if (Instruction* I = dyn_cast<Instruction>(V)) {
1675     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1676       // If the possible ranges don't contain zero, then the value is
1677       // definitely non-zero.
1678       if (IntegerType* Ty = dyn_cast<IntegerType>(V->getType())) {
1679         const APInt ZeroValue(Ty->getBitWidth(), 0);
1680         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1681           return true;
1682       }
1683     }
1684   }
1685 
1686   // The remaining tests are all recursive, so bail out if we hit the limit.
1687   if (Depth++ >= MaxDepth)
1688     return false;
1689 
1690   // Check for pointer simplifications.
1691   if (V->getType()->isPointerTy()) {
1692     if (isKnownNonNull(V))
1693       return true;
1694     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1695       if (isGEPKnownNonNull(GEP, Depth, Q))
1696         return true;
1697   }
1698 
1699   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1700 
1701   // X | Y != 0 if X != 0 or Y != 0.
1702   Value *X = nullptr, *Y = nullptr;
1703   if (match(V, m_Or(m_Value(X), m_Value(Y))))
1704     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1705 
1706   // ext X != 0 if X != 0.
1707   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1708     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1709 
1710   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
1711   // if the lowest bit is shifted off the end.
1712   if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1713     // shl nuw can't remove any non-zero bits.
1714     OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1715     if (BO->hasNoUnsignedWrap())
1716       return isKnownNonZero(X, Depth, Q);
1717 
1718     APInt KnownZero(BitWidth, 0);
1719     APInt KnownOne(BitWidth, 0);
1720     computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1721     if (KnownOne[0])
1722       return true;
1723   }
1724   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
1725   // defined if the sign bit is shifted off the end.
1726   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1727     // shr exact can only shift out zero bits.
1728     PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1729     if (BO->isExact())
1730       return isKnownNonZero(X, Depth, Q);
1731 
1732     bool XKnownNonNegative, XKnownNegative;
1733     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1734     if (XKnownNegative)
1735       return true;
1736 
1737     // If the shifter operand is a constant, and all of the bits shifted
1738     // out are known to be zero, and X is known non-zero then at least one
1739     // non-zero bit must remain.
1740     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1741       APInt KnownZero(BitWidth, 0);
1742       APInt KnownOne(BitWidth, 0);
1743       computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1744 
1745       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1746       // Is there a known one in the portion not shifted out?
1747       if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
1748         return true;
1749       // Are all the bits to be shifted out known zero?
1750       if (KnownZero.countTrailingOnes() >= ShiftVal)
1751         return isKnownNonZero(X, Depth, Q);
1752     }
1753   }
1754   // div exact can only produce a zero if the dividend is zero.
1755   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1756     return isKnownNonZero(X, Depth, Q);
1757   }
1758   // X + Y.
1759   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1760     bool XKnownNonNegative, XKnownNegative;
1761     bool YKnownNonNegative, YKnownNegative;
1762     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1763     ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Depth, Q);
1764 
1765     // If X and Y are both non-negative (as signed values) then their sum is not
1766     // zero unless both X and Y are zero.
1767     if (XKnownNonNegative && YKnownNonNegative)
1768       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1769         return true;
1770 
1771     // If X and Y are both negative (as signed values) then their sum is not
1772     // zero unless both X and Y equal INT_MIN.
1773     if (BitWidth && XKnownNegative && YKnownNegative) {
1774       APInt KnownZero(BitWidth, 0);
1775       APInt KnownOne(BitWidth, 0);
1776       APInt Mask = APInt::getSignedMaxValue(BitWidth);
1777       // The sign bit of X is set.  If some other bit is set then X is not equal
1778       // to INT_MIN.
1779       computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1780       if ((KnownOne & Mask) != 0)
1781         return true;
1782       // The sign bit of Y is set.  If some other bit is set then Y is not equal
1783       // to INT_MIN.
1784       computeKnownBits(Y, KnownZero, KnownOne, Depth, Q);
1785       if ((KnownOne & Mask) != 0)
1786         return true;
1787     }
1788 
1789     // The sum of a non-negative number and a power of two is not zero.
1790     if (XKnownNonNegative &&
1791         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1792       return true;
1793     if (YKnownNonNegative &&
1794         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1795       return true;
1796   }
1797   // X * Y.
1798   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1799     OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1800     // If X and Y are non-zero then so is X * Y as long as the multiplication
1801     // does not overflow.
1802     if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1803         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1804       return true;
1805   }
1806   // (C ? X : Y) != 0 if X != 0 and Y != 0.
1807   else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1808     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1809         isKnownNonZero(SI->getFalseValue(), Depth, Q))
1810       return true;
1811   }
1812   // PHI
1813   else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1814     // Try and detect a recurrence that monotonically increases from a
1815     // starting value, as these are common as induction variables.
1816     if (PN->getNumIncomingValues() == 2) {
1817       Value *Start = PN->getIncomingValue(0);
1818       Value *Induction = PN->getIncomingValue(1);
1819       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1820         std::swap(Start, Induction);
1821       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1822         if (!C->isZero() && !C->isNegative()) {
1823           ConstantInt *X;
1824           if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1825                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1826               !X->isNegative())
1827             return true;
1828         }
1829       }
1830     }
1831     // Check if all incoming values are non-zero constant.
1832     bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
1833       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue();
1834     });
1835     if (AllNonZeroConstants)
1836       return true;
1837   }
1838 
1839   if (!BitWidth) return false;
1840   APInt KnownZero(BitWidth, 0);
1841   APInt KnownOne(BitWidth, 0);
1842   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1843   return KnownOne != 0;
1844 }
1845 
1846 /// Return true if V2 == V1 + X, where X is known non-zero.
1847 static bool isAddOfNonZero(Value *V1, Value *V2, const Query &Q) {
1848   BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
1849   if (!BO || BO->getOpcode() != Instruction::Add)
1850     return false;
1851   Value *Op = nullptr;
1852   if (V2 == BO->getOperand(0))
1853     Op = BO->getOperand(1);
1854   else if (V2 == BO->getOperand(1))
1855     Op = BO->getOperand(0);
1856   else
1857     return false;
1858   return isKnownNonZero(Op, 0, Q);
1859 }
1860 
1861 /// Return true if it is known that V1 != V2.
1862 static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q) {
1863   if (V1->getType()->isVectorTy() || V1 == V2)
1864     return false;
1865   if (V1->getType() != V2->getType())
1866     // We can't look through casts yet.
1867     return false;
1868   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
1869     return true;
1870 
1871   if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) {
1872     // Are any known bits in V1 contradictory to known bits in V2? If V1
1873     // has a known zero where V2 has a known one, they must not be equal.
1874     auto BitWidth = Ty->getBitWidth();
1875     APInt KnownZero1(BitWidth, 0);
1876     APInt KnownOne1(BitWidth, 0);
1877     computeKnownBits(V1, KnownZero1, KnownOne1, 0, Q);
1878     APInt KnownZero2(BitWidth, 0);
1879     APInt KnownOne2(BitWidth, 0);
1880     computeKnownBits(V2, KnownZero2, KnownOne2, 0, Q);
1881 
1882     auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1);
1883     if (OppositeBits.getBoolValue())
1884       return true;
1885   }
1886   return false;
1887 }
1888 
1889 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
1890 /// simplify operations downstream. Mask is known to be zero for bits that V
1891 /// cannot have.
1892 ///
1893 /// This function is defined on values with integer type, values with pointer
1894 /// type, and vectors of integers.  In the case
1895 /// where V is a vector, the mask, known zero, and known one values are the
1896 /// same width as the vector element, and the bit is set only if it is true
1897 /// for all of the elements in the vector.
1898 bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth,
1899                        const Query &Q) {
1900   APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
1901   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1902   return (KnownZero & Mask) == Mask;
1903 }
1904 
1905 
1906 
1907 /// Return the number of times the sign bit of the register is replicated into
1908 /// the other bits. We know that at least 1 bit is always equal to the sign bit
1909 /// (itself), but other cases can give us information. For example, immediately
1910 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
1911 /// other, so we return 3.
1912 ///
1913 /// 'Op' must have a scalar integer type.
1914 ///
1915 unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q) {
1916   unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
1917   unsigned Tmp, Tmp2;
1918   unsigned FirstAnswer = 1;
1919 
1920   // Note that ConstantInt is handled by the general computeKnownBits case
1921   // below.
1922 
1923   if (Depth == 6)
1924     return 1;  // Limit search depth.
1925 
1926   Operator *U = dyn_cast<Operator>(V);
1927   switch (Operator::getOpcode(V)) {
1928   default: break;
1929   case Instruction::SExt:
1930     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
1931     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
1932 
1933   case Instruction::SDiv: {
1934     const APInt *Denominator;
1935     // sdiv X, C -> adds log(C) sign bits.
1936     if (match(U->getOperand(1), m_APInt(Denominator))) {
1937 
1938       // Ignore non-positive denominator.
1939       if (!Denominator->isStrictlyPositive())
1940         break;
1941 
1942       // Calculate the incoming numerator bits.
1943       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
1944 
1945       // Add floor(log(C)) bits to the numerator bits.
1946       return std::min(TyBits, NumBits + Denominator->logBase2());
1947     }
1948     break;
1949   }
1950 
1951   case Instruction::SRem: {
1952     const APInt *Denominator;
1953     // srem X, C -> we know that the result is within [-C+1,C) when C is a
1954     // positive constant.  This let us put a lower bound on the number of sign
1955     // bits.
1956     if (match(U->getOperand(1), m_APInt(Denominator))) {
1957 
1958       // Ignore non-positive denominator.
1959       if (!Denominator->isStrictlyPositive())
1960         break;
1961 
1962       // Calculate the incoming numerator bits. SRem by a positive constant
1963       // can't lower the number of sign bits.
1964       unsigned NumrBits =
1965           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
1966 
1967       // Calculate the leading sign bit constraints by examining the
1968       // denominator.  Given that the denominator is positive, there are two
1969       // cases:
1970       //
1971       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
1972       //     (1 << ceilLogBase2(C)).
1973       //
1974       //  2. the numerator is negative.  Then the result range is (-C,0] and
1975       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
1976       //
1977       // Thus a lower bound on the number of sign bits is `TyBits -
1978       // ceilLogBase2(C)`.
1979 
1980       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
1981       return std::max(NumrBits, ResBits);
1982     }
1983     break;
1984   }
1985 
1986   case Instruction::AShr: {
1987     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
1988     // ashr X, C   -> adds C sign bits.  Vectors too.
1989     const APInt *ShAmt;
1990     if (match(U->getOperand(1), m_APInt(ShAmt))) {
1991       Tmp += ShAmt->getZExtValue();
1992       if (Tmp > TyBits) Tmp = TyBits;
1993     }
1994     return Tmp;
1995   }
1996   case Instruction::Shl: {
1997     const APInt *ShAmt;
1998     if (match(U->getOperand(1), m_APInt(ShAmt))) {
1999       // shl destroys sign bits.
2000       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2001       Tmp2 = ShAmt->getZExtValue();
2002       if (Tmp2 >= TyBits ||      // Bad shift.
2003           Tmp2 >= Tmp) break;    // Shifted all sign bits out.
2004       return Tmp - Tmp2;
2005     }
2006     break;
2007   }
2008   case Instruction::And:
2009   case Instruction::Or:
2010   case Instruction::Xor:    // NOT is handled here.
2011     // Logical binary ops preserve the number of sign bits at the worst.
2012     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2013     if (Tmp != 1) {
2014       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2015       FirstAnswer = std::min(Tmp, Tmp2);
2016       // We computed what we know about the sign bits as our first
2017       // answer. Now proceed to the generic code that uses
2018       // computeKnownBits, and pick whichever answer is better.
2019     }
2020     break;
2021 
2022   case Instruction::Select:
2023     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2024     if (Tmp == 1) return 1;  // Early out.
2025     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2026     return std::min(Tmp, Tmp2);
2027 
2028   case Instruction::Add:
2029     // Add can have at most one carry bit.  Thus we know that the output
2030     // is, at worst, one more bit than the inputs.
2031     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2032     if (Tmp == 1) return 1;  // Early out.
2033 
2034     // Special case decrementing a value (ADD X, -1):
2035     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2036       if (CRHS->isAllOnesValue()) {
2037         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2038         computeKnownBits(U->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
2039 
2040         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2041         // sign bits set.
2042         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2043           return TyBits;
2044 
2045         // If we are subtracting one from a positive number, there is no carry
2046         // out of the result.
2047         if (KnownZero.isNegative())
2048           return Tmp;
2049       }
2050 
2051     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2052     if (Tmp2 == 1) return 1;
2053     return std::min(Tmp, Tmp2)-1;
2054 
2055   case Instruction::Sub:
2056     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2057     if (Tmp2 == 1) return 1;
2058 
2059     // Handle NEG.
2060     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2061       if (CLHS->isNullValue()) {
2062         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2063         computeKnownBits(U->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
2064         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2065         // sign bits set.
2066         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2067           return TyBits;
2068 
2069         // If the input is known to be positive (the sign bit is known clear),
2070         // the output of the NEG has the same number of sign bits as the input.
2071         if (KnownZero.isNegative())
2072           return Tmp2;
2073 
2074         // Otherwise, we treat this like a SUB.
2075       }
2076 
2077     // Sub can have at most one carry bit.  Thus we know that the output
2078     // is, at worst, one more bit than the inputs.
2079     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2080     if (Tmp == 1) return 1;  // Early out.
2081     return std::min(Tmp, Tmp2)-1;
2082 
2083   case Instruction::PHI: {
2084     PHINode *PN = cast<PHINode>(U);
2085     unsigned NumIncomingValues = PN->getNumIncomingValues();
2086     // Don't analyze large in-degree PHIs.
2087     if (NumIncomingValues > 4) break;
2088     // Unreachable blocks may have zero-operand PHI nodes.
2089     if (NumIncomingValues == 0) break;
2090 
2091     // Take the minimum of all incoming values.  This can't infinitely loop
2092     // because of our depth threshold.
2093     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2094     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2095       if (Tmp == 1) return Tmp;
2096       Tmp = std::min(
2097           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2098     }
2099     return Tmp;
2100   }
2101 
2102   case Instruction::Trunc:
2103     // FIXME: it's tricky to do anything useful for this, but it is an important
2104     // case for targets like X86.
2105     break;
2106   }
2107 
2108   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2109   // use this information.
2110   APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2111   APInt Mask;
2112   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2113 
2114   if (KnownZero.isNegative()) {        // sign bit is 0
2115     Mask = KnownZero;
2116   } else if (KnownOne.isNegative()) {  // sign bit is 1;
2117     Mask = KnownOne;
2118   } else {
2119     // Nothing known.
2120     return FirstAnswer;
2121   }
2122 
2123   // Okay, we know that the sign bit in Mask is set.  Use CLZ to determine
2124   // the number of identical bits in the top of the input value.
2125   Mask = ~Mask;
2126   Mask <<= Mask.getBitWidth()-TyBits;
2127   // Return # leading zeros.  We use 'min' here in case Val was zero before
2128   // shifting.  We don't want to return '64' as for an i32 "0".
2129   return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros()));
2130 }
2131 
2132 /// This function computes the integer multiple of Base that equals V.
2133 /// If successful, it returns true and returns the multiple in
2134 /// Multiple. If unsuccessful, it returns false. It looks
2135 /// through SExt instructions only if LookThroughSExt is true.
2136 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2137                            bool LookThroughSExt, unsigned Depth) {
2138   const unsigned MaxDepth = 6;
2139 
2140   assert(V && "No Value?");
2141   assert(Depth <= MaxDepth && "Limit Search Depth");
2142   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2143 
2144   Type *T = V->getType();
2145 
2146   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2147 
2148   if (Base == 0)
2149     return false;
2150 
2151   if (Base == 1) {
2152     Multiple = V;
2153     return true;
2154   }
2155 
2156   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2157   Constant *BaseVal = ConstantInt::get(T, Base);
2158   if (CO && CO == BaseVal) {
2159     // Multiple is 1.
2160     Multiple = ConstantInt::get(T, 1);
2161     return true;
2162   }
2163 
2164   if (CI && CI->getZExtValue() % Base == 0) {
2165     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2166     return true;
2167   }
2168 
2169   if (Depth == MaxDepth) return false;  // Limit search depth.
2170 
2171   Operator *I = dyn_cast<Operator>(V);
2172   if (!I) return false;
2173 
2174   switch (I->getOpcode()) {
2175   default: break;
2176   case Instruction::SExt:
2177     if (!LookThroughSExt) return false;
2178     // otherwise fall through to ZExt
2179   case Instruction::ZExt:
2180     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2181                            LookThroughSExt, Depth+1);
2182   case Instruction::Shl:
2183   case Instruction::Mul: {
2184     Value *Op0 = I->getOperand(0);
2185     Value *Op1 = I->getOperand(1);
2186 
2187     if (I->getOpcode() == Instruction::Shl) {
2188       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2189       if (!Op1CI) return false;
2190       // Turn Op0 << Op1 into Op0 * 2^Op1
2191       APInt Op1Int = Op1CI->getValue();
2192       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2193       APInt API(Op1Int.getBitWidth(), 0);
2194       API.setBit(BitToSet);
2195       Op1 = ConstantInt::get(V->getContext(), API);
2196     }
2197 
2198     Value *Mul0 = nullptr;
2199     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2200       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2201         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2202           if (Op1C->getType()->getPrimitiveSizeInBits() <
2203               MulC->getType()->getPrimitiveSizeInBits())
2204             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2205           if (Op1C->getType()->getPrimitiveSizeInBits() >
2206               MulC->getType()->getPrimitiveSizeInBits())
2207             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2208 
2209           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2210           Multiple = ConstantExpr::getMul(MulC, Op1C);
2211           return true;
2212         }
2213 
2214       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2215         if (Mul0CI->getValue() == 1) {
2216           // V == Base * Op1, so return Op1
2217           Multiple = Op1;
2218           return true;
2219         }
2220     }
2221 
2222     Value *Mul1 = nullptr;
2223     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2224       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2225         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2226           if (Op0C->getType()->getPrimitiveSizeInBits() <
2227               MulC->getType()->getPrimitiveSizeInBits())
2228             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2229           if (Op0C->getType()->getPrimitiveSizeInBits() >
2230               MulC->getType()->getPrimitiveSizeInBits())
2231             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2232 
2233           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2234           Multiple = ConstantExpr::getMul(MulC, Op0C);
2235           return true;
2236         }
2237 
2238       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2239         if (Mul1CI->getValue() == 1) {
2240           // V == Base * Op0, so return Op0
2241           Multiple = Op0;
2242           return true;
2243         }
2244     }
2245   }
2246   }
2247 
2248   // We could not determine if V is a multiple of Base.
2249   return false;
2250 }
2251 
2252 /// Return true if we can prove that the specified FP value is never equal to
2253 /// -0.0.
2254 ///
2255 /// NOTE: this function will need to be revisited when we support non-default
2256 /// rounding modes!
2257 ///
2258 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
2259   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2260     return !CFP->getValueAPF().isNegZero();
2261 
2262   // FIXME: Magic number! At the least, this should be given a name because it's
2263   // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to
2264   // expose it as a parameter, so it can be used for testing / experimenting.
2265   if (Depth == 6)
2266     return false;  // Limit search depth.
2267 
2268   const Operator *I = dyn_cast<Operator>(V);
2269   if (!I) return false;
2270 
2271   // Check if the nsz fast-math flag is set
2272   if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2273     if (FPO->hasNoSignedZeros())
2274       return true;
2275 
2276   // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2277   if (I->getOpcode() == Instruction::FAdd)
2278     if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2279       if (CFP->isNullValue())
2280         return true;
2281 
2282   // sitofp and uitofp turn into +0.0 for zero.
2283   if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2284     return true;
2285 
2286   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2287     // sqrt(-0.0) = -0.0, no other negative results are possible.
2288     if (II->getIntrinsicID() == Intrinsic::sqrt)
2289       return CannotBeNegativeZero(II->getArgOperand(0), Depth+1);
2290 
2291   if (const CallInst *CI = dyn_cast<CallInst>(I))
2292     if (const Function *F = CI->getCalledFunction()) {
2293       if (F->isDeclaration()) {
2294         // abs(x) != -0.0
2295         if (F->getName() == "abs") return true;
2296         // fabs[lf](x) != -0.0
2297         if (F->getName() == "fabs") return true;
2298         if (F->getName() == "fabsf") return true;
2299         if (F->getName() == "fabsl") return true;
2300         if (F->getName() == "sqrt" || F->getName() == "sqrtf" ||
2301             F->getName() == "sqrtl")
2302           return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1);
2303       }
2304     }
2305 
2306   return false;
2307 }
2308 
2309 bool llvm::CannotBeOrderedLessThanZero(const Value *V, unsigned Depth) {
2310   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2311     return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
2312 
2313   // FIXME: Magic number! At the least, this should be given a name because it's
2314   // used similarly in CannotBeNegativeZero(). A better fix may be to
2315   // expose it as a parameter, so it can be used for testing / experimenting.
2316   if (Depth == 6)
2317     return false;  // Limit search depth.
2318 
2319   const Operator *I = dyn_cast<Operator>(V);
2320   if (!I) return false;
2321 
2322   switch (I->getOpcode()) {
2323   default: break;
2324   // Unsigned integers are always nonnegative.
2325   case Instruction::UIToFP:
2326     return true;
2327   case Instruction::FMul:
2328     // x*x is always non-negative or a NaN.
2329     if (I->getOperand(0) == I->getOperand(1))
2330       return true;
2331     // Fall through
2332   case Instruction::FAdd:
2333   case Instruction::FDiv:
2334   case Instruction::FRem:
2335     return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) &&
2336            CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1);
2337   case Instruction::Select:
2338     return CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1) &&
2339            CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1);
2340   case Instruction::FPExt:
2341   case Instruction::FPTrunc:
2342     // Widening/narrowing never change sign.
2343     return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1);
2344   case Instruction::Call:
2345     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2346       switch (II->getIntrinsicID()) {
2347       default: break;
2348       case Intrinsic::maxnum:
2349         return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) ||
2350                CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1);
2351       case Intrinsic::minnum:
2352         return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) &&
2353                CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1);
2354       case Intrinsic::exp:
2355       case Intrinsic::exp2:
2356       case Intrinsic::fabs:
2357       case Intrinsic::sqrt:
2358         return true;
2359       case Intrinsic::powi:
2360         if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
2361           // powi(x,n) is non-negative if n is even.
2362           if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
2363             return true;
2364         }
2365         return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1);
2366       case Intrinsic::fma:
2367       case Intrinsic::fmuladd:
2368         // x*x+y is non-negative if y is non-negative.
2369         return I->getOperand(0) == I->getOperand(1) &&
2370                CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1);
2371       }
2372     break;
2373   }
2374   return false;
2375 }
2376 
2377 /// If the specified value can be set by repeating the same byte in memory,
2378 /// return the i8 value that it is represented with.  This is
2379 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2380 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
2381 /// byte store (e.g. i16 0x1234), return null.
2382 Value *llvm::isBytewiseValue(Value *V) {
2383   // All byte-wide stores are splatable, even of arbitrary variables.
2384   if (V->getType()->isIntegerTy(8)) return V;
2385 
2386   // Handle 'null' ConstantArrayZero etc.
2387   if (Constant *C = dyn_cast<Constant>(V))
2388     if (C->isNullValue())
2389       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2390 
2391   // Constant float and double values can be handled as integer values if the
2392   // corresponding integer value is "byteable".  An important case is 0.0.
2393   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2394     if (CFP->getType()->isFloatTy())
2395       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2396     if (CFP->getType()->isDoubleTy())
2397       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2398     // Don't handle long double formats, which have strange constraints.
2399   }
2400 
2401   // We can handle constant integers that are multiple of 8 bits.
2402   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2403     if (CI->getBitWidth() % 8 == 0) {
2404       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2405 
2406       if (!CI->getValue().isSplat(8))
2407         return nullptr;
2408       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2409     }
2410   }
2411 
2412   // A ConstantDataArray/Vector is splatable if all its members are equal and
2413   // also splatable.
2414   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2415     Value *Elt = CA->getElementAsConstant(0);
2416     Value *Val = isBytewiseValue(Elt);
2417     if (!Val)
2418       return nullptr;
2419 
2420     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2421       if (CA->getElementAsConstant(I) != Elt)
2422         return nullptr;
2423 
2424     return Val;
2425   }
2426 
2427   // Conceptually, we could handle things like:
2428   //   %a = zext i8 %X to i16
2429   //   %b = shl i16 %a, 8
2430   //   %c = or i16 %a, %b
2431   // but until there is an example that actually needs this, it doesn't seem
2432   // worth worrying about.
2433   return nullptr;
2434 }
2435 
2436 
2437 // This is the recursive version of BuildSubAggregate. It takes a few different
2438 // arguments. Idxs is the index within the nested struct From that we are
2439 // looking at now (which is of type IndexedType). IdxSkip is the number of
2440 // indices from Idxs that should be left out when inserting into the resulting
2441 // struct. To is the result struct built so far, new insertvalue instructions
2442 // build on that.
2443 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2444                                 SmallVectorImpl<unsigned> &Idxs,
2445                                 unsigned IdxSkip,
2446                                 Instruction *InsertBefore) {
2447   llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2448   if (STy) {
2449     // Save the original To argument so we can modify it
2450     Value *OrigTo = To;
2451     // General case, the type indexed by Idxs is a struct
2452     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2453       // Process each struct element recursively
2454       Idxs.push_back(i);
2455       Value *PrevTo = To;
2456       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2457                              InsertBefore);
2458       Idxs.pop_back();
2459       if (!To) {
2460         // Couldn't find any inserted value for this index? Cleanup
2461         while (PrevTo != OrigTo) {
2462           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2463           PrevTo = Del->getAggregateOperand();
2464           Del->eraseFromParent();
2465         }
2466         // Stop processing elements
2467         break;
2468       }
2469     }
2470     // If we successfully found a value for each of our subaggregates
2471     if (To)
2472       return To;
2473   }
2474   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2475   // the struct's elements had a value that was inserted directly. In the latter
2476   // case, perhaps we can't determine each of the subelements individually, but
2477   // we might be able to find the complete struct somewhere.
2478 
2479   // Find the value that is at that particular spot
2480   Value *V = FindInsertedValue(From, Idxs);
2481 
2482   if (!V)
2483     return nullptr;
2484 
2485   // Insert the value in the new (sub) aggregrate
2486   return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2487                                        "tmp", InsertBefore);
2488 }
2489 
2490 // This helper takes a nested struct and extracts a part of it (which is again a
2491 // struct) into a new value. For example, given the struct:
2492 // { a, { b, { c, d }, e } }
2493 // and the indices "1, 1" this returns
2494 // { c, d }.
2495 //
2496 // It does this by inserting an insertvalue for each element in the resulting
2497 // struct, as opposed to just inserting a single struct. This will only work if
2498 // each of the elements of the substruct are known (ie, inserted into From by an
2499 // insertvalue instruction somewhere).
2500 //
2501 // All inserted insertvalue instructions are inserted before InsertBefore
2502 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2503                                 Instruction *InsertBefore) {
2504   assert(InsertBefore && "Must have someplace to insert!");
2505   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2506                                                              idx_range);
2507   Value *To = UndefValue::get(IndexedType);
2508   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2509   unsigned IdxSkip = Idxs.size();
2510 
2511   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2512 }
2513 
2514 /// Given an aggregrate and an sequence of indices, see if
2515 /// the scalar value indexed is already around as a register, for example if it
2516 /// were inserted directly into the aggregrate.
2517 ///
2518 /// If InsertBefore is not null, this function will duplicate (modified)
2519 /// insertvalues when a part of a nested struct is extracted.
2520 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2521                                Instruction *InsertBefore) {
2522   // Nothing to index? Just return V then (this is useful at the end of our
2523   // recursion).
2524   if (idx_range.empty())
2525     return V;
2526   // We have indices, so V should have an indexable type.
2527   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2528          "Not looking at a struct or array?");
2529   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2530          "Invalid indices for type?");
2531 
2532   if (Constant *C = dyn_cast<Constant>(V)) {
2533     C = C->getAggregateElement(idx_range[0]);
2534     if (!C) return nullptr;
2535     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2536   }
2537 
2538   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2539     // Loop the indices for the insertvalue instruction in parallel with the
2540     // requested indices
2541     const unsigned *req_idx = idx_range.begin();
2542     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2543          i != e; ++i, ++req_idx) {
2544       if (req_idx == idx_range.end()) {
2545         // We can't handle this without inserting insertvalues
2546         if (!InsertBefore)
2547           return nullptr;
2548 
2549         // The requested index identifies a part of a nested aggregate. Handle
2550         // this specially. For example,
2551         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2552         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2553         // %C = extractvalue {i32, { i32, i32 } } %B, 1
2554         // This can be changed into
2555         // %A = insertvalue {i32, i32 } undef, i32 10, 0
2556         // %C = insertvalue {i32, i32 } %A, i32 11, 1
2557         // which allows the unused 0,0 element from the nested struct to be
2558         // removed.
2559         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2560                                  InsertBefore);
2561       }
2562 
2563       // This insert value inserts something else than what we are looking for.
2564       // See if the (aggregate) value inserted into has the value we are
2565       // looking for, then.
2566       if (*req_idx != *i)
2567         return FindInsertedValue(I->getAggregateOperand(), idx_range,
2568                                  InsertBefore);
2569     }
2570     // If we end up here, the indices of the insertvalue match with those
2571     // requested (though possibly only partially). Now we recursively look at
2572     // the inserted value, passing any remaining indices.
2573     return FindInsertedValue(I->getInsertedValueOperand(),
2574                              makeArrayRef(req_idx, idx_range.end()),
2575                              InsertBefore);
2576   }
2577 
2578   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2579     // If we're extracting a value from an aggregate that was extracted from
2580     // something else, we can extract from that something else directly instead.
2581     // However, we will need to chain I's indices with the requested indices.
2582 
2583     // Calculate the number of indices required
2584     unsigned size = I->getNumIndices() + idx_range.size();
2585     // Allocate some space to put the new indices in
2586     SmallVector<unsigned, 5> Idxs;
2587     Idxs.reserve(size);
2588     // Add indices from the extract value instruction
2589     Idxs.append(I->idx_begin(), I->idx_end());
2590 
2591     // Add requested indices
2592     Idxs.append(idx_range.begin(), idx_range.end());
2593 
2594     assert(Idxs.size() == size
2595            && "Number of indices added not correct?");
2596 
2597     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2598   }
2599   // Otherwise, we don't know (such as, extracting from a function return value
2600   // or load instruction)
2601   return nullptr;
2602 }
2603 
2604 /// Analyze the specified pointer to see if it can be expressed as a base
2605 /// pointer plus a constant offset. Return the base and offset to the caller.
2606 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2607                                               const DataLayout &DL) {
2608   unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2609   APInt ByteOffset(BitWidth, 0);
2610 
2611   // We walk up the defs but use a visited set to handle unreachable code. In
2612   // that case, we stop after accumulating the cycle once (not that it
2613   // matters).
2614   SmallPtrSet<Value *, 16> Visited;
2615   while (Visited.insert(Ptr).second) {
2616     if (Ptr->getType()->isVectorTy())
2617       break;
2618 
2619     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2620       APInt GEPOffset(BitWidth, 0);
2621       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2622         break;
2623 
2624       ByteOffset += GEPOffset;
2625 
2626       Ptr = GEP->getPointerOperand();
2627     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2628                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2629       Ptr = cast<Operator>(Ptr)->getOperand(0);
2630     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2631       if (GA->mayBeOverridden())
2632         break;
2633       Ptr = GA->getAliasee();
2634     } else {
2635       break;
2636     }
2637   }
2638   Offset = ByteOffset.getSExtValue();
2639   return Ptr;
2640 }
2641 
2642 
2643 /// This function computes the length of a null-terminated C string pointed to
2644 /// by V. If successful, it returns true and returns the string in Str.
2645 /// If unsuccessful, it returns false.
2646 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
2647                                  uint64_t Offset, bool TrimAtNul) {
2648   assert(V);
2649 
2650   // Look through bitcast instructions and geps.
2651   V = V->stripPointerCasts();
2652 
2653   // If the value is a GEP instruction or constant expression, treat it as an
2654   // offset.
2655   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2656     // Make sure the GEP has exactly three arguments.
2657     if (GEP->getNumOperands() != 3)
2658       return false;
2659 
2660     // Make sure the index-ee is a pointer to array of i8.
2661     ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
2662     if (!AT || !AT->getElementType()->isIntegerTy(8))
2663       return false;
2664 
2665     // Check to make sure that the first operand of the GEP is an integer and
2666     // has value 0 so that we are sure we're indexing into the initializer.
2667     const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
2668     if (!FirstIdx || !FirstIdx->isZero())
2669       return false;
2670 
2671     // If the second index isn't a ConstantInt, then this is a variable index
2672     // into the array.  If this occurs, we can't say anything meaningful about
2673     // the string.
2674     uint64_t StartIdx = 0;
2675     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
2676       StartIdx = CI->getZExtValue();
2677     else
2678       return false;
2679     return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
2680                                  TrimAtNul);
2681   }
2682 
2683   // The GEP instruction, constant or instruction, must reference a global
2684   // variable that is a constant and is initialized. The referenced constant
2685   // initializer is the array that we'll use for optimization.
2686   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2687   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
2688     return false;
2689 
2690   // Handle the all-zeros case
2691   if (GV->getInitializer()->isNullValue()) {
2692     // This is a degenerate case. The initializer is constant zero so the
2693     // length of the string must be zero.
2694     Str = "";
2695     return true;
2696   }
2697 
2698   // Must be a Constant Array
2699   const ConstantDataArray *Array =
2700     dyn_cast<ConstantDataArray>(GV->getInitializer());
2701   if (!Array || !Array->isString())
2702     return false;
2703 
2704   // Get the number of elements in the array
2705   uint64_t NumElts = Array->getType()->getArrayNumElements();
2706 
2707   // Start out with the entire array in the StringRef.
2708   Str = Array->getAsString();
2709 
2710   if (Offset > NumElts)
2711     return false;
2712 
2713   // Skip over 'offset' bytes.
2714   Str = Str.substr(Offset);
2715 
2716   if (TrimAtNul) {
2717     // Trim off the \0 and anything after it.  If the array is not nul
2718     // terminated, we just return the whole end of string.  The client may know
2719     // some other way that the string is length-bound.
2720     Str = Str.substr(0, Str.find('\0'));
2721   }
2722   return true;
2723 }
2724 
2725 // These next two are very similar to the above, but also look through PHI
2726 // nodes.
2727 // TODO: See if we can integrate these two together.
2728 
2729 /// If we can compute the length of the string pointed to by
2730 /// the specified pointer, return 'len+1'.  If we can't, return 0.
2731 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) {
2732   // Look through noop bitcast instructions.
2733   V = V->stripPointerCasts();
2734 
2735   // If this is a PHI node, there are two cases: either we have already seen it
2736   // or we haven't.
2737   if (PHINode *PN = dyn_cast<PHINode>(V)) {
2738     if (!PHIs.insert(PN).second)
2739       return ~0ULL;  // already in the set.
2740 
2741     // If it was new, see if all the input strings are the same length.
2742     uint64_t LenSoFar = ~0ULL;
2743     for (Value *IncValue : PN->incoming_values()) {
2744       uint64_t Len = GetStringLengthH(IncValue, PHIs);
2745       if (Len == 0) return 0; // Unknown length -> unknown.
2746 
2747       if (Len == ~0ULL) continue;
2748 
2749       if (Len != LenSoFar && LenSoFar != ~0ULL)
2750         return 0;    // Disagree -> unknown.
2751       LenSoFar = Len;
2752     }
2753 
2754     // Success, all agree.
2755     return LenSoFar;
2756   }
2757 
2758   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
2759   if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
2760     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
2761     if (Len1 == 0) return 0;
2762     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
2763     if (Len2 == 0) return 0;
2764     if (Len1 == ~0ULL) return Len2;
2765     if (Len2 == ~0ULL) return Len1;
2766     if (Len1 != Len2) return 0;
2767     return Len1;
2768   }
2769 
2770   // Otherwise, see if we can read the string.
2771   StringRef StrData;
2772   if (!getConstantStringInfo(V, StrData))
2773     return 0;
2774 
2775   return StrData.size()+1;
2776 }
2777 
2778 /// If we can compute the length of the string pointed to by
2779 /// the specified pointer, return 'len+1'.  If we can't, return 0.
2780 uint64_t llvm::GetStringLength(Value *V) {
2781   if (!V->getType()->isPointerTy()) return 0;
2782 
2783   SmallPtrSet<PHINode*, 32> PHIs;
2784   uint64_t Len = GetStringLengthH(V, PHIs);
2785   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
2786   // an empty string as a length.
2787   return Len == ~0ULL ? 1 : Len;
2788 }
2789 
2790 /// \brief \p PN defines a loop-variant pointer to an object.  Check if the
2791 /// previous iteration of the loop was referring to the same object as \p PN.
2792 static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) {
2793   // Find the loop-defined value.
2794   Loop *L = LI->getLoopFor(PN->getParent());
2795   if (PN->getNumIncomingValues() != 2)
2796     return true;
2797 
2798   // Find the value from previous iteration.
2799   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
2800   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
2801     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
2802   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
2803     return true;
2804 
2805   // If a new pointer is loaded in the loop, the pointer references a different
2806   // object in every iteration.  E.g.:
2807   //    for (i)
2808   //       int *p = a[i];
2809   //       ...
2810   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
2811     if (!L->isLoopInvariant(Load->getPointerOperand()))
2812       return false;
2813   return true;
2814 }
2815 
2816 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
2817                                  unsigned MaxLookup) {
2818   if (!V->getType()->isPointerTy())
2819     return V;
2820   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
2821     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2822       V = GEP->getPointerOperand();
2823     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
2824                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
2825       V = cast<Operator>(V)->getOperand(0);
2826     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2827       if (GA->mayBeOverridden())
2828         return V;
2829       V = GA->getAliasee();
2830     } else {
2831       // See if InstructionSimplify knows any relevant tricks.
2832       if (Instruction *I = dyn_cast<Instruction>(V))
2833         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
2834         if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
2835           V = Simplified;
2836           continue;
2837         }
2838 
2839       return V;
2840     }
2841     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
2842   }
2843   return V;
2844 }
2845 
2846 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
2847                                 const DataLayout &DL, LoopInfo *LI,
2848                                 unsigned MaxLookup) {
2849   SmallPtrSet<Value *, 4> Visited;
2850   SmallVector<Value *, 4> Worklist;
2851   Worklist.push_back(V);
2852   do {
2853     Value *P = Worklist.pop_back_val();
2854     P = GetUnderlyingObject(P, DL, MaxLookup);
2855 
2856     if (!Visited.insert(P).second)
2857       continue;
2858 
2859     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
2860       Worklist.push_back(SI->getTrueValue());
2861       Worklist.push_back(SI->getFalseValue());
2862       continue;
2863     }
2864 
2865     if (PHINode *PN = dyn_cast<PHINode>(P)) {
2866       // If this PHI changes the underlying object in every iteration of the
2867       // loop, don't look through it.  Consider:
2868       //   int **A;
2869       //   for (i) {
2870       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
2871       //     Curr = A[i];
2872       //     *Prev, *Curr;
2873       //
2874       // Prev is tracking Curr one iteration behind so they refer to different
2875       // underlying objects.
2876       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
2877           isSameUnderlyingObjectInLoop(PN, LI))
2878         for (Value *IncValue : PN->incoming_values())
2879           Worklist.push_back(IncValue);
2880       continue;
2881     }
2882 
2883     Objects.push_back(P);
2884   } while (!Worklist.empty());
2885 }
2886 
2887 /// Return true if the only users of this pointer are lifetime markers.
2888 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
2889   for (const User *U : V->users()) {
2890     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2891     if (!II) return false;
2892 
2893     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
2894         II->getIntrinsicID() != Intrinsic::lifetime_end)
2895       return false;
2896   }
2897   return true;
2898 }
2899 
2900 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
2901                                         const Instruction *CtxI,
2902                                         const DominatorTree *DT,
2903                                         const TargetLibraryInfo *TLI) {
2904   const Operator *Inst = dyn_cast<Operator>(V);
2905   if (!Inst)
2906     return false;
2907 
2908   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
2909     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
2910       if (C->canTrap())
2911         return false;
2912 
2913   switch (Inst->getOpcode()) {
2914   default:
2915     return true;
2916   case Instruction::UDiv:
2917   case Instruction::URem: {
2918     // x / y is undefined if y == 0.
2919     const APInt *V;
2920     if (match(Inst->getOperand(1), m_APInt(V)))
2921       return *V != 0;
2922     return false;
2923   }
2924   case Instruction::SDiv:
2925   case Instruction::SRem: {
2926     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
2927     const APInt *Numerator, *Denominator;
2928     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
2929       return false;
2930     // We cannot hoist this division if the denominator is 0.
2931     if (*Denominator == 0)
2932       return false;
2933     // It's safe to hoist if the denominator is not 0 or -1.
2934     if (*Denominator != -1)
2935       return true;
2936     // At this point we know that the denominator is -1.  It is safe to hoist as
2937     // long we know that the numerator is not INT_MIN.
2938     if (match(Inst->getOperand(0), m_APInt(Numerator)))
2939       return !Numerator->isMinSignedValue();
2940     // The numerator *might* be MinSignedValue.
2941     return false;
2942   }
2943   case Instruction::Load: {
2944     const LoadInst *LI = cast<LoadInst>(Inst);
2945     if (!LI->isUnordered() ||
2946         // Speculative load may create a race that did not exist in the source.
2947         LI->getParent()->getParent()->hasFnAttribute(
2948             Attribute::SanitizeThread) ||
2949         // Speculative load may load data from dirty regions.
2950         LI->getParent()->getParent()->hasFnAttribute(
2951             Attribute::SanitizeAddress))
2952       return false;
2953     const DataLayout &DL = LI->getModule()->getDataLayout();
2954     return isDereferenceableAndAlignedPointer(
2955         LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI);
2956   }
2957   case Instruction::Call: {
2958     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
2959       switch (II->getIntrinsicID()) {
2960       // These synthetic intrinsics have no side-effects and just mark
2961       // information about their operands.
2962       // FIXME: There are other no-op synthetic instructions that potentially
2963       // should be considered at least *safe* to speculate...
2964       case Intrinsic::dbg_declare:
2965       case Intrinsic::dbg_value:
2966         return true;
2967 
2968       case Intrinsic::bswap:
2969       case Intrinsic::ctlz:
2970       case Intrinsic::ctpop:
2971       case Intrinsic::cttz:
2972       case Intrinsic::objectsize:
2973       case Intrinsic::sadd_with_overflow:
2974       case Intrinsic::smul_with_overflow:
2975       case Intrinsic::ssub_with_overflow:
2976       case Intrinsic::uadd_with_overflow:
2977       case Intrinsic::umul_with_overflow:
2978       case Intrinsic::usub_with_overflow:
2979         return true;
2980       // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set
2981       // errno like libm sqrt would.
2982       case Intrinsic::sqrt:
2983       case Intrinsic::fma:
2984       case Intrinsic::fmuladd:
2985       case Intrinsic::fabs:
2986       case Intrinsic::minnum:
2987       case Intrinsic::maxnum:
2988         return true;
2989       // TODO: some fp intrinsics are marked as having the same error handling
2990       // as libm. They're safe to speculate when they won't error.
2991       // TODO: are convert_{from,to}_fp16 safe?
2992       // TODO: can we list target-specific intrinsics here?
2993       default: break;
2994       }
2995     }
2996     return false; // The called function could have undefined behavior or
2997                   // side-effects, even if marked readnone nounwind.
2998   }
2999   case Instruction::VAArg:
3000   case Instruction::Alloca:
3001   case Instruction::Invoke:
3002   case Instruction::PHI:
3003   case Instruction::Store:
3004   case Instruction::Ret:
3005   case Instruction::Br:
3006   case Instruction::IndirectBr:
3007   case Instruction::Switch:
3008   case Instruction::Unreachable:
3009   case Instruction::Fence:
3010   case Instruction::AtomicRMW:
3011   case Instruction::AtomicCmpXchg:
3012   case Instruction::LandingPad:
3013   case Instruction::Resume:
3014   case Instruction::CatchSwitch:
3015   case Instruction::CatchPad:
3016   case Instruction::CatchRet:
3017   case Instruction::CleanupPad:
3018   case Instruction::CleanupRet:
3019     return false; // Misc instructions which have effects
3020   }
3021 }
3022 
3023 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3024   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3025 }
3026 
3027 /// Return true if we know that the specified value is never null.
3028 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
3029   assert(V->getType()->isPointerTy() && "V must be pointer type");
3030 
3031   // Alloca never returns null, malloc might.
3032   if (isa<AllocaInst>(V)) return true;
3033 
3034   // A byval, inalloca, or nonnull argument is never null.
3035   if (const Argument *A = dyn_cast<Argument>(V))
3036     return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3037 
3038   // A global variable in address space 0 is non null unless extern weak.
3039   // Other address spaces may have null as a valid address for a global,
3040   // so we can't assume anything.
3041   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3042     return !GV->hasExternalWeakLinkage() &&
3043            GV->getType()->getAddressSpace() == 0;
3044 
3045   // A Load tagged w/nonnull metadata is never null.
3046   if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3047     return LI->getMetadata(LLVMContext::MD_nonnull);
3048 
3049   if (auto CS = ImmutableCallSite(V))
3050     if (CS.isReturnNonNull())
3051       return true;
3052 
3053   return false;
3054 }
3055 
3056 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3057                                                   const Instruction *CtxI,
3058                                                   const DominatorTree *DT) {
3059   assert(V->getType()->isPointerTy() && "V must be pointer type");
3060 
3061   unsigned NumUsesExplored = 0;
3062   for (auto U : V->users()) {
3063     // Avoid massive lists
3064     if (NumUsesExplored >= DomConditionsMaxUses)
3065       break;
3066     NumUsesExplored++;
3067     // Consider only compare instructions uniquely controlling a branch
3068     const ICmpInst *Cmp = dyn_cast<ICmpInst>(U);
3069     if (!Cmp)
3070       continue;
3071 
3072     for (auto *CmpU : Cmp->users()) {
3073       const BranchInst *BI = dyn_cast<BranchInst>(CmpU);
3074       if (!BI)
3075         continue;
3076 
3077       assert(BI->isConditional() && "uses a comparison!");
3078 
3079       BasicBlock *NonNullSuccessor = nullptr;
3080       CmpInst::Predicate Pred;
3081 
3082       if (match(const_cast<ICmpInst*>(Cmp),
3083                 m_c_ICmp(Pred, m_Specific(V), m_Zero()))) {
3084         if (Pred == ICmpInst::ICMP_EQ)
3085           NonNullSuccessor = BI->getSuccessor(1);
3086         else if (Pred == ICmpInst::ICMP_NE)
3087           NonNullSuccessor = BI->getSuccessor(0);
3088       }
3089 
3090       if (NonNullSuccessor) {
3091         BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3092         if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3093           return true;
3094       }
3095     }
3096   }
3097 
3098   return false;
3099 }
3100 
3101 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3102                    const DominatorTree *DT, const TargetLibraryInfo *TLI) {
3103   if (isKnownNonNull(V, TLI))
3104     return true;
3105 
3106   return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
3107 }
3108 
3109 OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
3110                                                    const DataLayout &DL,
3111                                                    AssumptionCache *AC,
3112                                                    const Instruction *CxtI,
3113                                                    const DominatorTree *DT) {
3114   // Multiplying n * m significant bits yields a result of n + m significant
3115   // bits. If the total number of significant bits does not exceed the
3116   // result bit width (minus 1), there is no overflow.
3117   // This means if we have enough leading zero bits in the operands
3118   // we can guarantee that the result does not overflow.
3119   // Ref: "Hacker's Delight" by Henry Warren
3120   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3121   APInt LHSKnownZero(BitWidth, 0);
3122   APInt LHSKnownOne(BitWidth, 0);
3123   APInt RHSKnownZero(BitWidth, 0);
3124   APInt RHSKnownOne(BitWidth, 0);
3125   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3126                    DT);
3127   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3128                    DT);
3129   // Note that underestimating the number of zero bits gives a more
3130   // conservative answer.
3131   unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
3132                       RHSKnownZero.countLeadingOnes();
3133   // First handle the easy case: if we have enough zero bits there's
3134   // definitely no overflow.
3135   if (ZeroBits >= BitWidth)
3136     return OverflowResult::NeverOverflows;
3137 
3138   // Get the largest possible values for each operand.
3139   APInt LHSMax = ~LHSKnownZero;
3140   APInt RHSMax = ~RHSKnownZero;
3141 
3142   // We know the multiply operation doesn't overflow if the maximum values for
3143   // each operand will not overflow after we multiply them together.
3144   bool MaxOverflow;
3145   LHSMax.umul_ov(RHSMax, MaxOverflow);
3146   if (!MaxOverflow)
3147     return OverflowResult::NeverOverflows;
3148 
3149   // We know it always overflows if multiplying the smallest possible values for
3150   // the operands also results in overflow.
3151   bool MinOverflow;
3152   LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
3153   if (MinOverflow)
3154     return OverflowResult::AlwaysOverflows;
3155 
3156   return OverflowResult::MayOverflow;
3157 }
3158 
3159 OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
3160                                                    const DataLayout &DL,
3161                                                    AssumptionCache *AC,
3162                                                    const Instruction *CxtI,
3163                                                    const DominatorTree *DT) {
3164   bool LHSKnownNonNegative, LHSKnownNegative;
3165   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3166                  AC, CxtI, DT);
3167   if (LHSKnownNonNegative || LHSKnownNegative) {
3168     bool RHSKnownNonNegative, RHSKnownNegative;
3169     ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3170                    AC, CxtI, DT);
3171 
3172     if (LHSKnownNegative && RHSKnownNegative) {
3173       // The sign bit is set in both cases: this MUST overflow.
3174       // Create a simple add instruction, and insert it into the struct.
3175       return OverflowResult::AlwaysOverflows;
3176     }
3177 
3178     if (LHSKnownNonNegative && RHSKnownNonNegative) {
3179       // The sign bit is clear in both cases: this CANNOT overflow.
3180       // Create a simple add instruction, and insert it into the struct.
3181       return OverflowResult::NeverOverflows;
3182     }
3183   }
3184 
3185   return OverflowResult::MayOverflow;
3186 }
3187 
3188 static OverflowResult computeOverflowForSignedAdd(
3189     Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL,
3190     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) {
3191   if (Add && Add->hasNoSignedWrap()) {
3192     return OverflowResult::NeverOverflows;
3193   }
3194 
3195   bool LHSKnownNonNegative, LHSKnownNegative;
3196   bool RHSKnownNonNegative, RHSKnownNegative;
3197   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3198                  AC, CxtI, DT);
3199   ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3200                  AC, CxtI, DT);
3201 
3202   if ((LHSKnownNonNegative && RHSKnownNegative) ||
3203       (LHSKnownNegative && RHSKnownNonNegative)) {
3204     // The sign bits are opposite: this CANNOT overflow.
3205     return OverflowResult::NeverOverflows;
3206   }
3207 
3208   // The remaining code needs Add to be available. Early returns if not so.
3209   if (!Add)
3210     return OverflowResult::MayOverflow;
3211 
3212   // If the sign of Add is the same as at least one of the operands, this add
3213   // CANNOT overflow. This is particularly useful when the sum is
3214   // @llvm.assume'ed non-negative rather than proved so from analyzing its
3215   // operands.
3216   bool LHSOrRHSKnownNonNegative =
3217       (LHSKnownNonNegative || RHSKnownNonNegative);
3218   bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
3219   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3220     bool AddKnownNonNegative, AddKnownNegative;
3221     ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
3222                    /*Depth=*/0, AC, CxtI, DT);
3223     if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
3224         (AddKnownNegative && LHSOrRHSKnownNegative)) {
3225       return OverflowResult::NeverOverflows;
3226     }
3227   }
3228 
3229   return OverflowResult::MayOverflow;
3230 }
3231 
3232 OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
3233                                                  const DataLayout &DL,
3234                                                  AssumptionCache *AC,
3235                                                  const Instruction *CxtI,
3236                                                  const DominatorTree *DT) {
3237   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3238                                        Add, DL, AC, CxtI, DT);
3239 }
3240 
3241 OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS,
3242                                                  const DataLayout &DL,
3243                                                  AssumptionCache *AC,
3244                                                  const Instruction *CxtI,
3245                                                  const DominatorTree *DT) {
3246   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3247 }
3248 
3249 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3250   // FIXME: This conservative implementation can be relaxed. E.g. most
3251   // atomic operations are guaranteed to terminate on most platforms
3252   // and most functions terminate.
3253 
3254   return !I->isAtomic() &&       // atomics may never succeed on some platforms
3255          !isa<CallInst>(I) &&    // could throw and might not terminate
3256          !isa<InvokeInst>(I) &&  // might not terminate and could throw to
3257                                  //   non-successor (see bug 24185 for details).
3258          !isa<ResumeInst>(I) &&  // has no successors
3259          !isa<ReturnInst>(I);    // has no successors
3260 }
3261 
3262 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3263                                                   const Loop *L) {
3264   // The loop header is guaranteed to be executed for every iteration.
3265   //
3266   // FIXME: Relax this constraint to cover all basic blocks that are
3267   // guaranteed to be executed at every iteration.
3268   if (I->getParent() != L->getHeader()) return false;
3269 
3270   for (const Instruction &LI : *L->getHeader()) {
3271     if (&LI == I) return true;
3272     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3273   }
3274   llvm_unreachable("Instruction not contained in its own parent basic block.");
3275 }
3276 
3277 bool llvm::propagatesFullPoison(const Instruction *I) {
3278   switch (I->getOpcode()) {
3279     case Instruction::Add:
3280     case Instruction::Sub:
3281     case Instruction::Xor:
3282     case Instruction::Trunc:
3283     case Instruction::BitCast:
3284     case Instruction::AddrSpaceCast:
3285       // These operations all propagate poison unconditionally. Note that poison
3286       // is not any particular value, so xor or subtraction of poison with
3287       // itself still yields poison, not zero.
3288       return true;
3289 
3290     case Instruction::AShr:
3291     case Instruction::SExt:
3292       // For these operations, one bit of the input is replicated across
3293       // multiple output bits. A replicated poison bit is still poison.
3294       return true;
3295 
3296     case Instruction::Shl: {
3297       // Left shift *by* a poison value is poison. The number of
3298       // positions to shift is unsigned, so no negative values are
3299       // possible there. Left shift by zero places preserves poison. So
3300       // it only remains to consider left shift of poison by a positive
3301       // number of places.
3302       //
3303       // A left shift by a positive number of places leaves the lowest order bit
3304       // non-poisoned. However, if such a shift has a no-wrap flag, then we can
3305       // make the poison operand violate that flag, yielding a fresh full-poison
3306       // value.
3307       auto *OBO = cast<OverflowingBinaryOperator>(I);
3308       return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
3309     }
3310 
3311     case Instruction::Mul: {
3312       // A multiplication by zero yields a non-poison zero result, so we need to
3313       // rule out zero as an operand. Conservatively, multiplication by a
3314       // non-zero constant is not multiplication by zero.
3315       //
3316       // Multiplication by a non-zero constant can leave some bits
3317       // non-poisoned. For example, a multiplication by 2 leaves the lowest
3318       // order bit unpoisoned. So we need to consider that.
3319       //
3320       // Multiplication by 1 preserves poison. If the multiplication has a
3321       // no-wrap flag, then we can make the poison operand violate that flag
3322       // when multiplied by any integer other than 0 and 1.
3323       auto *OBO = cast<OverflowingBinaryOperator>(I);
3324       if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) {
3325         for (Value *V : OBO->operands()) {
3326           if (auto *CI = dyn_cast<ConstantInt>(V)) {
3327             // A ConstantInt cannot yield poison, so we can assume that it is
3328             // the other operand that is poison.
3329             return !CI->isZero();
3330           }
3331         }
3332       }
3333       return false;
3334     }
3335 
3336     case Instruction::GetElementPtr:
3337       // A GEP implicitly represents a sequence of additions, subtractions,
3338       // truncations, sign extensions and multiplications. The multiplications
3339       // are by the non-zero sizes of some set of types, so we do not have to be
3340       // concerned with multiplication by zero. If the GEP is in-bounds, then
3341       // these operations are implicitly no-signed-wrap so poison is propagated
3342       // by the arguments above for Add, Sub, Trunc, SExt and Mul.
3343       return cast<GEPOperator>(I)->isInBounds();
3344 
3345     default:
3346       return false;
3347   }
3348 }
3349 
3350 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3351   switch (I->getOpcode()) {
3352     case Instruction::Store:
3353       return cast<StoreInst>(I)->getPointerOperand();
3354 
3355     case Instruction::Load:
3356       return cast<LoadInst>(I)->getPointerOperand();
3357 
3358     case Instruction::AtomicCmpXchg:
3359       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3360 
3361     case Instruction::AtomicRMW:
3362       return cast<AtomicRMWInst>(I)->getPointerOperand();
3363 
3364     case Instruction::UDiv:
3365     case Instruction::SDiv:
3366     case Instruction::URem:
3367     case Instruction::SRem:
3368       return I->getOperand(1);
3369 
3370     default:
3371       return nullptr;
3372   }
3373 }
3374 
3375 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
3376   // We currently only look for uses of poison values within the same basic
3377   // block, as that makes it easier to guarantee that the uses will be
3378   // executed given that PoisonI is executed.
3379   //
3380   // FIXME: Expand this to consider uses beyond the same basic block. To do
3381   // this, look out for the distinction between post-dominance and strong
3382   // post-dominance.
3383   const BasicBlock *BB = PoisonI->getParent();
3384 
3385   // Set of instructions that we have proved will yield poison if PoisonI
3386   // does.
3387   SmallSet<const Value *, 16> YieldsPoison;
3388   YieldsPoison.insert(PoisonI);
3389 
3390   for (BasicBlock::const_iterator I = PoisonI->getIterator(), E = BB->end();
3391        I != E; ++I) {
3392     if (&*I != PoisonI) {
3393       const Value *NotPoison = getGuaranteedNonFullPoisonOp(&*I);
3394       if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true;
3395       if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
3396         return false;
3397     }
3398 
3399     // Mark poison that propagates from I through uses of I.
3400     if (YieldsPoison.count(&*I)) {
3401       for (const User *User : I->users()) {
3402         const Instruction *UserI = cast<Instruction>(User);
3403         if (UserI->getParent() == BB && propagatesFullPoison(UserI))
3404           YieldsPoison.insert(User);
3405       }
3406     }
3407   }
3408   return false;
3409 }
3410 
3411 static bool isKnownNonNaN(Value *V, FastMathFlags FMF) {
3412   if (FMF.noNaNs())
3413     return true;
3414 
3415   if (auto *C = dyn_cast<ConstantFP>(V))
3416     return !C->isNaN();
3417   return false;
3418 }
3419 
3420 static bool isKnownNonZero(Value *V) {
3421   if (auto *C = dyn_cast<ConstantFP>(V))
3422     return !C->isZero();
3423   return false;
3424 }
3425 
3426 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
3427                                               FastMathFlags FMF,
3428                                               Value *CmpLHS, Value *CmpRHS,
3429                                               Value *TrueVal, Value *FalseVal,
3430                                               Value *&LHS, Value *&RHS) {
3431   LHS = CmpLHS;
3432   RHS = CmpRHS;
3433 
3434   // If the predicate is an "or-equal"  (FP) predicate, then signed zeroes may
3435   // return inconsistent results between implementations.
3436   //   (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
3437   //   minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
3438   // Therefore we behave conservatively and only proceed if at least one of the
3439   // operands is known to not be zero, or if we don't care about signed zeroes.
3440   switch (Pred) {
3441   default: break;
3442   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
3443   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
3444     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
3445         !isKnownNonZero(CmpRHS))
3446       return {SPF_UNKNOWN, SPNB_NA, false};
3447   }
3448 
3449   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
3450   bool Ordered = false;
3451 
3452   // When given one NaN and one non-NaN input:
3453   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
3454   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
3455   //     ordered comparison fails), which could be NaN or non-NaN.
3456   // so here we discover exactly what NaN behavior is required/accepted.
3457   if (CmpInst::isFPPredicate(Pred)) {
3458     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
3459     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
3460 
3461     if (LHSSafe && RHSSafe) {
3462       // Both operands are known non-NaN.
3463       NaNBehavior = SPNB_RETURNS_ANY;
3464     } else if (CmpInst::isOrdered(Pred)) {
3465       // An ordered comparison will return false when given a NaN, so it
3466       // returns the RHS.
3467       Ordered = true;
3468       if (LHSSafe)
3469         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
3470         NaNBehavior = SPNB_RETURNS_NAN;
3471       else if (RHSSafe)
3472         NaNBehavior = SPNB_RETURNS_OTHER;
3473       else
3474         // Completely unsafe.
3475         return {SPF_UNKNOWN, SPNB_NA, false};
3476     } else {
3477       Ordered = false;
3478       // An unordered comparison will return true when given a NaN, so it
3479       // returns the LHS.
3480       if (LHSSafe)
3481         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
3482         NaNBehavior = SPNB_RETURNS_OTHER;
3483       else if (RHSSafe)
3484         NaNBehavior = SPNB_RETURNS_NAN;
3485       else
3486         // Completely unsafe.
3487         return {SPF_UNKNOWN, SPNB_NA, false};
3488     }
3489   }
3490 
3491   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
3492     std::swap(CmpLHS, CmpRHS);
3493     Pred = CmpInst::getSwappedPredicate(Pred);
3494     if (NaNBehavior == SPNB_RETURNS_NAN)
3495       NaNBehavior = SPNB_RETURNS_OTHER;
3496     else if (NaNBehavior == SPNB_RETURNS_OTHER)
3497       NaNBehavior = SPNB_RETURNS_NAN;
3498     Ordered = !Ordered;
3499   }
3500 
3501   // ([if]cmp X, Y) ? X : Y
3502   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
3503     switch (Pred) {
3504     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
3505     case ICmpInst::ICMP_UGT:
3506     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
3507     case ICmpInst::ICMP_SGT:
3508     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
3509     case ICmpInst::ICMP_ULT:
3510     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
3511     case ICmpInst::ICMP_SLT:
3512     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
3513     case FCmpInst::FCMP_UGT:
3514     case FCmpInst::FCMP_UGE:
3515     case FCmpInst::FCMP_OGT:
3516     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
3517     case FCmpInst::FCMP_ULT:
3518     case FCmpInst::FCMP_ULE:
3519     case FCmpInst::FCMP_OLT:
3520     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
3521     }
3522   }
3523 
3524   if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) {
3525     if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
3526         (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
3527 
3528       // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
3529       // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
3530       if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) {
3531         return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3532       }
3533 
3534       // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
3535       // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
3536       if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) {
3537         return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3538       }
3539     }
3540 
3541     // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C)
3542     if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) {
3543       if (C1->getType() == C2->getType() && ~C1->getValue() == C2->getValue() &&
3544           (match(TrueVal, m_Not(m_Specific(CmpLHS))) ||
3545            match(CmpLHS, m_Not(m_Specific(TrueVal))))) {
3546         LHS = TrueVal;
3547         RHS = FalseVal;
3548         return {SPF_SMIN, SPNB_NA, false};
3549       }
3550     }
3551   }
3552 
3553   // TODO: (X > 4) ? X : 5   -->  (X >= 5) ? X : 5  -->  MAX(X, 5)
3554 
3555   return {SPF_UNKNOWN, SPNB_NA, false};
3556 }
3557 
3558 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
3559                               Instruction::CastOps *CastOp) {
3560   CastInst *CI = dyn_cast<CastInst>(V1);
3561   Constant *C = dyn_cast<Constant>(V2);
3562   CastInst *CI2 = dyn_cast<CastInst>(V2);
3563   if (!CI)
3564     return nullptr;
3565   *CastOp = CI->getOpcode();
3566 
3567   if (CI2) {
3568     // If V1 and V2 are both the same cast from the same type, we can look
3569     // through V1.
3570     if (CI2->getOpcode() == CI->getOpcode() &&
3571         CI2->getSrcTy() == CI->getSrcTy())
3572       return CI2->getOperand(0);
3573     return nullptr;
3574   } else if (!C) {
3575     return nullptr;
3576   }
3577 
3578   if (isa<SExtInst>(CI) && CmpI->isSigned()) {
3579     Constant *T = ConstantExpr::getTrunc(C, CI->getSrcTy());
3580     // This is only valid if the truncated value can be sign-extended
3581     // back to the original value.
3582     if (ConstantExpr::getSExt(T, C->getType()) == C)
3583       return T;
3584     return nullptr;
3585   }
3586   if (isa<ZExtInst>(CI) && CmpI->isUnsigned())
3587     return ConstantExpr::getTrunc(C, CI->getSrcTy());
3588 
3589   if (isa<TruncInst>(CI))
3590     return ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned());
3591 
3592   if (isa<FPToUIInst>(CI))
3593     return ConstantExpr::getUIToFP(C, CI->getSrcTy(), true);
3594 
3595   if (isa<FPToSIInst>(CI))
3596     return ConstantExpr::getSIToFP(C, CI->getSrcTy(), true);
3597 
3598   if (isa<UIToFPInst>(CI))
3599     return ConstantExpr::getFPToUI(C, CI->getSrcTy(), true);
3600 
3601   if (isa<SIToFPInst>(CI))
3602     return ConstantExpr::getFPToSI(C, CI->getSrcTy(), true);
3603 
3604   if (isa<FPTruncInst>(CI))
3605     return ConstantExpr::getFPExtend(C, CI->getSrcTy(), true);
3606 
3607   if (isa<FPExtInst>(CI))
3608     return ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true);
3609 
3610   return nullptr;
3611 }
3612 
3613 SelectPatternResult llvm::matchSelectPattern(Value *V,
3614                                              Value *&LHS, Value *&RHS,
3615                                              Instruction::CastOps *CastOp) {
3616   SelectInst *SI = dyn_cast<SelectInst>(V);
3617   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
3618 
3619   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
3620   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
3621 
3622   CmpInst::Predicate Pred = CmpI->getPredicate();
3623   Value *CmpLHS = CmpI->getOperand(0);
3624   Value *CmpRHS = CmpI->getOperand(1);
3625   Value *TrueVal = SI->getTrueValue();
3626   Value *FalseVal = SI->getFalseValue();
3627   FastMathFlags FMF;
3628   if (isa<FPMathOperator>(CmpI))
3629     FMF = CmpI->getFastMathFlags();
3630 
3631   // Bail out early.
3632   if (CmpI->isEquality())
3633     return {SPF_UNKNOWN, SPNB_NA, false};
3634 
3635   // Deal with type mismatches.
3636   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
3637     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
3638       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
3639                                   cast<CastInst>(TrueVal)->getOperand(0), C,
3640                                   LHS, RHS);
3641     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
3642       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
3643                                   C, cast<CastInst>(FalseVal)->getOperand(0),
3644                                   LHS, RHS);
3645   }
3646   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
3647                               LHS, RHS);
3648 }
3649 
3650 ConstantRange llvm::getConstantRangeFromMetadata(MDNode &Ranges) {
3651   const unsigned NumRanges = Ranges.getNumOperands() / 2;
3652   assert(NumRanges >= 1 && "Must have at least one range!");
3653   assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs");
3654 
3655   auto *FirstLow = mdconst::extract<ConstantInt>(Ranges.getOperand(0));
3656   auto *FirstHigh = mdconst::extract<ConstantInt>(Ranges.getOperand(1));
3657 
3658   ConstantRange CR(FirstLow->getValue(), FirstHigh->getValue());
3659 
3660   for (unsigned i = 1; i < NumRanges; ++i) {
3661     auto *Low = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
3662     auto *High = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
3663 
3664     // Note: unionWith will potentially create a range that contains values not
3665     // contained in any of the original N ranges.
3666     CR = CR.unionWith(ConstantRange(Low->getValue(), High->getValue()));
3667   }
3668 
3669   return CR;
3670 }
3671 
3672 /// Return true if "icmp Pred LHS RHS" is always true.
3673 static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
3674                             const DataLayout &DL, unsigned Depth,
3675                             AssumptionCache *AC, const Instruction *CxtI,
3676                             const DominatorTree *DT) {
3677   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
3678   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
3679     return true;
3680 
3681   switch (Pred) {
3682   default:
3683     return false;
3684 
3685   case CmpInst::ICMP_SLE: {
3686     const APInt *C;
3687 
3688     // LHS s<= LHS +_{nsw} C   if C >= 0
3689     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
3690       return !C->isNegative();
3691     return false;
3692   }
3693 
3694   case CmpInst::ICMP_ULE: {
3695     const APInt *C;
3696 
3697     // LHS u<= LHS +_{nuw} C   for any C
3698     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
3699       return true;
3700 
3701     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
3702     auto MatchNUWAddsToSameValue = [&](Value *A, Value *B, Value *&X,
3703                                        const APInt *&CA, const APInt *&CB) {
3704       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
3705           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
3706         return true;
3707 
3708       // If X & C == 0 then (X | C) == X +_{nuw} C
3709       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
3710           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
3711         unsigned BitWidth = CA->getBitWidth();
3712         APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3713         computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
3714 
3715         if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
3716           return true;
3717       }
3718 
3719       return false;
3720     };
3721 
3722     Value *X;
3723     const APInt *CLHS, *CRHS;
3724     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
3725       return CLHS->ule(*CRHS);
3726 
3727     return false;
3728   }
3729   }
3730 }
3731 
3732 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
3733 /// ALHS ARHS" is true.
3734 static bool isImpliedCondOperands(CmpInst::Predicate Pred, Value *ALHS,
3735                                   Value *ARHS, Value *BLHS, Value *BRHS,
3736                                   const DataLayout &DL, unsigned Depth,
3737                                   AssumptionCache *AC, const Instruction *CxtI,
3738                                   const DominatorTree *DT) {
3739   switch (Pred) {
3740   default:
3741     return false;
3742 
3743   case CmpInst::ICMP_SLT:
3744   case CmpInst::ICMP_SLE:
3745     return isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
3746                            DT) &&
3747            isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI,
3748                            DT);
3749 
3750   case CmpInst::ICMP_ULT:
3751   case CmpInst::ICMP_ULE:
3752     return isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
3753                            DT) &&
3754            isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI,
3755                            DT);
3756   }
3757 }
3758 
3759 bool llvm::isImpliedCondition(Value *LHS, Value *RHS, const DataLayout &DL,
3760                               unsigned Depth, AssumptionCache *AC,
3761                               const Instruction *CxtI,
3762                               const DominatorTree *DT) {
3763   assert(LHS->getType() == RHS->getType() && "mismatched type");
3764   Type *OpTy = LHS->getType();
3765   assert(OpTy->getScalarType()->isIntegerTy(1));
3766 
3767   // LHS ==> RHS by definition
3768   if (LHS == RHS) return true;
3769 
3770   if (OpTy->isVectorTy())
3771     // TODO: extending the code below to handle vectors
3772     return false;
3773   assert(OpTy->isIntegerTy(1) && "implied by above");
3774 
3775   ICmpInst::Predicate APred, BPred;
3776   Value *ALHS, *ARHS;
3777   Value *BLHS, *BRHS;
3778 
3779   if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
3780       !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
3781     return false;
3782 
3783   if (APred == BPred)
3784     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
3785                                  CxtI, DT);
3786 
3787   return false;
3788 }
3789