1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/Analysis/VectorUtils.h"
24 #include "llvm/IR/CallSite.h"
25 #include "llvm/IR/ConstantRange.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/Dominators.h"
29 #include "llvm/IR/GetElementPtrTypeIterator.h"
30 #include "llvm/IR/GlobalAlias.h"
31 #include "llvm/IR/GlobalVariable.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/Metadata.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/IR/Statepoint.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/MathExtras.h"
41 #include <algorithm>
42 #include <array>
43 #include <cstring>
44 using namespace llvm;
45 using namespace llvm::PatternMatch;
46 
47 const unsigned MaxDepth = 6;
48 
49 // Controls the number of uses of the value searched for possible
50 // dominating comparisons.
51 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
52                                               cl::Hidden, cl::init(20));
53 
54 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns
55 /// 0). For vector types, returns the element type's bitwidth.
56 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
57   if (unsigned BitWidth = Ty->getScalarSizeInBits())
58     return BitWidth;
59 
60   return DL.getPointerTypeSizeInBits(Ty);
61 }
62 
63 namespace {
64 // Simplifying using an assume can only be done in a particular control-flow
65 // context (the context instruction provides that context). If an assume and
66 // the context instruction are not in the same block then the DT helps in
67 // figuring out if we can use it.
68 struct Query {
69   const DataLayout &DL;
70   AssumptionCache *AC;
71   const Instruction *CxtI;
72   const DominatorTree *DT;
73 
74   /// Set of assumptions that should be excluded from further queries.
75   /// This is because of the potential for mutual recursion to cause
76   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
77   /// classic case of this is assume(x = y), which will attempt to determine
78   /// bits in x from bits in y, which will attempt to determine bits in y from
79   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
80   /// isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
81   /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so
82   /// on.
83   std::array<const Value*, MaxDepth> Excluded;
84   unsigned NumExcluded;
85 
86   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
87         const DominatorTree *DT)
88       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), NumExcluded(0) {}
89 
90   Query(const Query &Q, const Value *NewExcl)
91       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) {
92     Excluded = Q.Excluded;
93     Excluded[NumExcluded++] = NewExcl;
94     assert(NumExcluded <= Excluded.size());
95   }
96 
97   bool isExcluded(const Value *Value) const {
98     if (NumExcluded == 0)
99       return false;
100     auto End = Excluded.begin() + NumExcluded;
101     return std::find(Excluded.begin(), End, Value) != End;
102   }
103 };
104 } // end anonymous namespace
105 
106 // Given the provided Value and, potentially, a context instruction, return
107 // the preferred context instruction (if any).
108 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
109   // If we've been provided with a context instruction, then use that (provided
110   // it has been inserted).
111   if (CxtI && CxtI->getParent())
112     return CxtI;
113 
114   // If the value is really an already-inserted instruction, then use that.
115   CxtI = dyn_cast<Instruction>(V);
116   if (CxtI && CxtI->getParent())
117     return CxtI;
118 
119   return nullptr;
120 }
121 
122 static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
123                              unsigned Depth, const Query &Q);
124 
125 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
126                             const DataLayout &DL, unsigned Depth,
127                             AssumptionCache *AC, const Instruction *CxtI,
128                             const DominatorTree *DT) {
129   ::computeKnownBits(V, KnownZero, KnownOne, Depth,
130                      Query(DL, AC, safeCxtI(V, CxtI), DT));
131 }
132 
133 bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
134                                AssumptionCache *AC, const Instruction *CxtI,
135                                const DominatorTree *DT) {
136   assert(LHS->getType() == RHS->getType() &&
137          "LHS and RHS should have the same type");
138   assert(LHS->getType()->isIntOrIntVectorTy() &&
139          "LHS and RHS should be integers");
140   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
141   APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
142   APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
143   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
144   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
145   return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
146 }
147 
148 static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
149                            unsigned Depth, const Query &Q);
150 
151 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
152                           const DataLayout &DL, unsigned Depth,
153                           AssumptionCache *AC, const Instruction *CxtI,
154                           const DominatorTree *DT) {
155   ::ComputeSignBit(V, KnownZero, KnownOne, Depth,
156                    Query(DL, AC, safeCxtI(V, CxtI), DT));
157 }
158 
159 static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
160                                    const Query &Q);
161 
162 bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero,
163                                   unsigned Depth, AssumptionCache *AC,
164                                   const Instruction *CxtI,
165                                   const DominatorTree *DT) {
166   return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
167                                   Query(DL, AC, safeCxtI(V, CxtI), DT));
168 }
169 
170 static bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q);
171 
172 bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
173                           AssumptionCache *AC, const Instruction *CxtI,
174                           const DominatorTree *DT) {
175   return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
176 }
177 
178 bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth,
179                               AssumptionCache *AC, const Instruction *CxtI,
180                               const DominatorTree *DT) {
181   bool NonNegative, Negative;
182   ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
183   return NonNegative;
184 }
185 
186 bool llvm::isKnownPositive(Value *V, const DataLayout &DL, unsigned Depth,
187                            AssumptionCache *AC, const Instruction *CxtI,
188                            const DominatorTree *DT) {
189   if (auto *CI = dyn_cast<ConstantInt>(V))
190     return CI->getValue().isStrictlyPositive();
191 
192   // TODO: We'd doing two recursive queries here.  We should factor this such
193   // that only a single query is needed.
194   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
195     isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
196 }
197 
198 bool llvm::isKnownNegative(Value *V, const DataLayout &DL, unsigned Depth,
199                            AssumptionCache *AC, const Instruction *CxtI,
200                            const DominatorTree *DT) {
201   bool NonNegative, Negative;
202   ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
203   return Negative;
204 }
205 
206 static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q);
207 
208 bool llvm::isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
209                           AssumptionCache *AC, const Instruction *CxtI,
210                           const DominatorTree *DT) {
211   return ::isKnownNonEqual(V1, V2, Query(DL, AC,
212                                          safeCxtI(V1, safeCxtI(V2, CxtI)),
213                                          DT));
214 }
215 
216 static bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth,
217                               const Query &Q);
218 
219 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
220                              unsigned Depth, AssumptionCache *AC,
221                              const Instruction *CxtI, const DominatorTree *DT) {
222   return ::MaskedValueIsZero(V, Mask, Depth,
223                              Query(DL, AC, safeCxtI(V, CxtI), DT));
224 }
225 
226 static unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q);
227 
228 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL,
229                                   unsigned Depth, AssumptionCache *AC,
230                                   const Instruction *CxtI,
231                                   const DominatorTree *DT) {
232   return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
233 }
234 
235 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
236                                    APInt &KnownZero, APInt &KnownOne,
237                                    APInt &KnownZero2, APInt &KnownOne2,
238                                    unsigned Depth, const Query &Q) {
239   if (!Add) {
240     if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
241       // We know that the top bits of C-X are clear if X contains less bits
242       // than C (i.e. no wrap-around can happen).  For example, 20-X is
243       // positive if we can prove that X is >= 0 and < 16.
244       if (!CLHS->getValue().isNegative()) {
245         unsigned BitWidth = KnownZero.getBitWidth();
246         unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
247         // NLZ can't be BitWidth with no sign bit
248         APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
249         computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
250 
251         // If all of the MaskV bits are known to be zero, then we know the
252         // output top bits are zero, because we now know that the output is
253         // from [0-C].
254         if ((KnownZero2 & MaskV) == MaskV) {
255           unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
256           // Top bits known zero.
257           KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
258         }
259       }
260     }
261   }
262 
263   unsigned BitWidth = KnownZero.getBitWidth();
264 
265   // If an initial sequence of bits in the result is not needed, the
266   // corresponding bits in the operands are not needed.
267   APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
268   computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, Depth + 1, Q);
269   computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
270 
271   // Carry in a 1 for a subtract, rather than a 0.
272   APInt CarryIn(BitWidth, 0);
273   if (!Add) {
274     // Sum = LHS + ~RHS + 1
275     std::swap(KnownZero2, KnownOne2);
276     CarryIn.setBit(0);
277   }
278 
279   APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
280   APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
281 
282   // Compute known bits of the carry.
283   APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
284   APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
285 
286   // Compute set of known bits (where all three relevant bits are known).
287   APInt LHSKnown = LHSKnownZero | LHSKnownOne;
288   APInt RHSKnown = KnownZero2 | KnownOne2;
289   APInt CarryKnown = CarryKnownZero | CarryKnownOne;
290   APInt Known = LHSKnown & RHSKnown & CarryKnown;
291 
292   assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
293          "known bits of sum differ");
294 
295   // Compute known bits of the result.
296   KnownZero = ~PossibleSumOne & Known;
297   KnownOne = PossibleSumOne & Known;
298 
299   // Are we still trying to solve for the sign bit?
300   if (!Known.isNegative()) {
301     if (NSW) {
302       // Adding two non-negative numbers, or subtracting a negative number from
303       // a non-negative one, can't wrap into negative.
304       if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
305         KnownZero |= APInt::getSignBit(BitWidth);
306       // Adding two negative numbers, or subtracting a non-negative number from
307       // a negative one, can't wrap into non-negative.
308       else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
309         KnownOne |= APInt::getSignBit(BitWidth);
310     }
311   }
312 }
313 
314 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
315                                 APInt &KnownZero, APInt &KnownOne,
316                                 APInt &KnownZero2, APInt &KnownOne2,
317                                 unsigned Depth, const Query &Q) {
318   unsigned BitWidth = KnownZero.getBitWidth();
319   computeKnownBits(Op1, KnownZero, KnownOne, Depth + 1, Q);
320   computeKnownBits(Op0, KnownZero2, KnownOne2, Depth + 1, Q);
321 
322   bool isKnownNegative = false;
323   bool isKnownNonNegative = false;
324   // If the multiplication is known not to overflow, compute the sign bit.
325   if (NSW) {
326     if (Op0 == Op1) {
327       // The product of a number with itself is non-negative.
328       isKnownNonNegative = true;
329     } else {
330       bool isKnownNonNegativeOp1 = KnownZero.isNegative();
331       bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
332       bool isKnownNegativeOp1 = KnownOne.isNegative();
333       bool isKnownNegativeOp0 = KnownOne2.isNegative();
334       // The product of two numbers with the same sign is non-negative.
335       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
336         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
337       // The product of a negative number and a non-negative number is either
338       // negative or zero.
339       if (!isKnownNonNegative)
340         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
341                            isKnownNonZero(Op0, Depth, Q)) ||
342                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
343                            isKnownNonZero(Op1, Depth, Q));
344     }
345   }
346 
347   // If low bits are zero in either operand, output low known-0 bits.
348   // Also compute a conservative estimate for high known-0 bits.
349   // More trickiness is possible, but this is sufficient for the
350   // interesting case of alignment computation.
351   KnownOne.clearAllBits();
352   unsigned TrailZ = KnownZero.countTrailingOnes() +
353                     KnownZero2.countTrailingOnes();
354   unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
355                              KnownZero2.countLeadingOnes(),
356                              BitWidth) - BitWidth;
357 
358   TrailZ = std::min(TrailZ, BitWidth);
359   LeadZ = std::min(LeadZ, BitWidth);
360   KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
361               APInt::getHighBitsSet(BitWidth, LeadZ);
362 
363   // Only make use of no-wrap flags if we failed to compute the sign bit
364   // directly.  This matters if the multiplication always overflows, in
365   // which case we prefer to follow the result of the direct computation,
366   // though as the program is invoking undefined behaviour we can choose
367   // whatever we like here.
368   if (isKnownNonNegative && !KnownOne.isNegative())
369     KnownZero.setBit(BitWidth - 1);
370   else if (isKnownNegative && !KnownZero.isNegative())
371     KnownOne.setBit(BitWidth - 1);
372 }
373 
374 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
375                                              APInt &KnownZero,
376                                              APInt &KnownOne) {
377   unsigned BitWidth = KnownZero.getBitWidth();
378   unsigned NumRanges = Ranges.getNumOperands() / 2;
379   assert(NumRanges >= 1);
380 
381   KnownZero.setAllBits();
382   KnownOne.setAllBits();
383 
384   for (unsigned i = 0; i < NumRanges; ++i) {
385     ConstantInt *Lower =
386         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
387     ConstantInt *Upper =
388         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
389     ConstantRange Range(Lower->getValue(), Upper->getValue());
390 
391     // The first CommonPrefixBits of all values in Range are equal.
392     unsigned CommonPrefixBits =
393         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
394 
395     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
396     KnownOne &= Range.getUnsignedMax() & Mask;
397     KnownZero &= ~Range.getUnsignedMax() & Mask;
398   }
399 }
400 
401 static bool isEphemeralValueOf(Instruction *I, const Value *E) {
402   SmallVector<const Value *, 16> WorkSet(1, I);
403   SmallPtrSet<const Value *, 32> Visited;
404   SmallPtrSet<const Value *, 16> EphValues;
405 
406   // The instruction defining an assumption's condition itself is always
407   // considered ephemeral to that assumption (even if it has other
408   // non-ephemeral users). See r246696's test case for an example.
409   if (std::find(I->op_begin(), I->op_end(), E) != I->op_end())
410     return true;
411 
412   while (!WorkSet.empty()) {
413     const Value *V = WorkSet.pop_back_val();
414     if (!Visited.insert(V).second)
415       continue;
416 
417     // If all uses of this value are ephemeral, then so is this value.
418     if (std::all_of(V->user_begin(), V->user_end(),
419                     [&](const User *U) { return EphValues.count(U); })) {
420       if (V == E)
421         return true;
422 
423       EphValues.insert(V);
424       if (const User *U = dyn_cast<User>(V))
425         for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
426              J != JE; ++J) {
427           if (isSafeToSpeculativelyExecute(*J))
428             WorkSet.push_back(*J);
429         }
430     }
431   }
432 
433   return false;
434 }
435 
436 // Is this an intrinsic that cannot be speculated but also cannot trap?
437 static bool isAssumeLikeIntrinsic(const Instruction *I) {
438   if (const CallInst *CI = dyn_cast<CallInst>(I))
439     if (Function *F = CI->getCalledFunction())
440       switch (F->getIntrinsicID()) {
441       default: break;
442       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
443       case Intrinsic::assume:
444       case Intrinsic::dbg_declare:
445       case Intrinsic::dbg_value:
446       case Intrinsic::invariant_start:
447       case Intrinsic::invariant_end:
448       case Intrinsic::lifetime_start:
449       case Intrinsic::lifetime_end:
450       case Intrinsic::objectsize:
451       case Intrinsic::ptr_annotation:
452       case Intrinsic::var_annotation:
453         return true;
454       }
455 
456   return false;
457 }
458 
459 static bool isValidAssumeForContext(Value *V, const Instruction *CxtI,
460                                     const DominatorTree *DT) {
461   Instruction *Inv = cast<Instruction>(V);
462 
463   // There are two restrictions on the use of an assume:
464   //  1. The assume must dominate the context (or the control flow must
465   //     reach the assume whenever it reaches the context).
466   //  2. The context must not be in the assume's set of ephemeral values
467   //     (otherwise we will use the assume to prove that the condition
468   //     feeding the assume is trivially true, thus causing the removal of
469   //     the assume).
470 
471   if (DT) {
472     if (DT->dominates(Inv, CxtI)) {
473       return true;
474     } else if (Inv->getParent() == CxtI->getParent()) {
475       // The context comes first, but they're both in the same block. Make sure
476       // there is nothing in between that might interrupt the control flow.
477       for (BasicBlock::const_iterator I =
478              std::next(BasicBlock::const_iterator(CxtI)),
479                                       IE(Inv); I != IE; ++I)
480         if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
481           return false;
482 
483       return !isEphemeralValueOf(Inv, CxtI);
484     }
485 
486     return false;
487   }
488 
489   // When we don't have a DT, we do a limited search...
490   if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
491     return true;
492   } else if (Inv->getParent() == CxtI->getParent()) {
493     // Search forward from the assume until we reach the context (or the end
494     // of the block); the common case is that the assume will come first.
495     for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
496          IE = Inv->getParent()->end(); I != IE; ++I)
497       if (&*I == CxtI)
498         return true;
499 
500     // The context must come first...
501     for (BasicBlock::const_iterator I =
502            std::next(BasicBlock::const_iterator(CxtI)),
503                                     IE(Inv); I != IE; ++I)
504       if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
505         return false;
506 
507     return !isEphemeralValueOf(Inv, CxtI);
508   }
509 
510   return false;
511 }
512 
513 bool llvm::isValidAssumeForContext(const Instruction *I,
514                                    const Instruction *CxtI,
515                                    const DominatorTree *DT) {
516   return ::isValidAssumeForContext(const_cast<Instruction *>(I), CxtI, DT);
517 }
518 
519 static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
520                                        APInt &KnownOne, unsigned Depth,
521                                        const Query &Q) {
522   // Use of assumptions is context-sensitive. If we don't have a context, we
523   // cannot use them!
524   if (!Q.AC || !Q.CxtI)
525     return;
526 
527   unsigned BitWidth = KnownZero.getBitWidth();
528 
529   for (auto &AssumeVH : Q.AC->assumptions()) {
530     if (!AssumeVH)
531       continue;
532     CallInst *I = cast<CallInst>(AssumeVH);
533     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
534            "Got assumption for the wrong function!");
535     if (Q.isExcluded(I))
536       continue;
537 
538     // Warning: This loop can end up being somewhat performance sensetive.
539     // We're running this loop for once for each value queried resulting in a
540     // runtime of ~O(#assumes * #values).
541 
542     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
543            "must be an assume intrinsic");
544 
545     Value *Arg = I->getArgOperand(0);
546 
547     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
548       assert(BitWidth == 1 && "assume operand is not i1?");
549       KnownZero.clearAllBits();
550       KnownOne.setAllBits();
551       return;
552     }
553 
554     // The remaining tests are all recursive, so bail out if we hit the limit.
555     if (Depth == MaxDepth)
556       continue;
557 
558     Value *A, *B;
559     auto m_V = m_CombineOr(m_Specific(V),
560                            m_CombineOr(m_PtrToInt(m_Specific(V)),
561                            m_BitCast(m_Specific(V))));
562 
563     CmpInst::Predicate Pred;
564     ConstantInt *C;
565     // assume(v = a)
566     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
567         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
568       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
569       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
570       KnownZero |= RHSKnownZero;
571       KnownOne  |= RHSKnownOne;
572     // assume(v & b = a)
573     } else if (match(Arg,
574                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
575                Pred == ICmpInst::ICMP_EQ &&
576                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
577       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
578       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
579       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
580       computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
581 
582       // For those bits in the mask that are known to be one, we can propagate
583       // known bits from the RHS to V.
584       KnownZero |= RHSKnownZero & MaskKnownOne;
585       KnownOne  |= RHSKnownOne  & MaskKnownOne;
586     // assume(~(v & b) = a)
587     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
588                                    m_Value(A))) &&
589                Pred == ICmpInst::ICMP_EQ &&
590                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
591       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
592       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
593       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
594       computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
595 
596       // For those bits in the mask that are known to be one, we can propagate
597       // inverted known bits from the RHS to V.
598       KnownZero |= RHSKnownOne  & MaskKnownOne;
599       KnownOne  |= RHSKnownZero & MaskKnownOne;
600     // assume(v | b = a)
601     } else if (match(Arg,
602                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
603                Pred == ICmpInst::ICMP_EQ &&
604                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
605       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
606       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
607       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
608       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
609 
610       // For those bits in B that are known to be zero, we can propagate known
611       // bits from the RHS to V.
612       KnownZero |= RHSKnownZero & BKnownZero;
613       KnownOne  |= RHSKnownOne  & BKnownZero;
614     // assume(~(v | b) = a)
615     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
616                                    m_Value(A))) &&
617                Pred == ICmpInst::ICMP_EQ &&
618                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
619       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
620       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
621       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
622       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
623 
624       // For those bits in B that are known to be zero, we can propagate
625       // inverted known bits from the RHS to V.
626       KnownZero |= RHSKnownOne  & BKnownZero;
627       KnownOne  |= RHSKnownZero & BKnownZero;
628     // assume(v ^ b = a)
629     } else if (match(Arg,
630                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
631                Pred == ICmpInst::ICMP_EQ &&
632                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
633       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
634       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
635       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
636       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
637 
638       // For those bits in B that are known to be zero, we can propagate known
639       // bits from the RHS to V. For those bits in B that are known to be one,
640       // we can propagate inverted known bits from the RHS to V.
641       KnownZero |= RHSKnownZero & BKnownZero;
642       KnownOne  |= RHSKnownOne  & BKnownZero;
643       KnownZero |= RHSKnownOne  & BKnownOne;
644       KnownOne  |= RHSKnownZero & BKnownOne;
645     // assume(~(v ^ b) = a)
646     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
647                                    m_Value(A))) &&
648                Pred == ICmpInst::ICMP_EQ &&
649                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
650       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
651       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
652       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
653       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
654 
655       // For those bits in B that are known to be zero, we can propagate
656       // inverted known bits from the RHS to V. For those bits in B that are
657       // known to be one, we can propagate known bits from the RHS to V.
658       KnownZero |= RHSKnownOne  & BKnownZero;
659       KnownOne  |= RHSKnownZero & BKnownZero;
660       KnownZero |= RHSKnownZero & BKnownOne;
661       KnownOne  |= RHSKnownOne  & BKnownOne;
662     // assume(v << c = a)
663     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
664                                    m_Value(A))) &&
665                Pred == ICmpInst::ICMP_EQ &&
666                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
667       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
668       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
669       // For those bits in RHS that are known, we can propagate them to known
670       // bits in V shifted to the right by C.
671       KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
672       KnownOne  |= RHSKnownOne.lshr(C->getZExtValue());
673     // assume(~(v << c) = a)
674     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
675                                    m_Value(A))) &&
676                Pred == ICmpInst::ICMP_EQ &&
677                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
678       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
679       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
680       // For those bits in RHS that are known, we can propagate them inverted
681       // to known bits in V shifted to the right by C.
682       KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
683       KnownOne  |= RHSKnownZero.lshr(C->getZExtValue());
684     // assume(v >> c = a)
685     } else if (match(Arg,
686                      m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
687                                                 m_AShr(m_V, m_ConstantInt(C))),
688                               m_Value(A))) &&
689                Pred == ICmpInst::ICMP_EQ &&
690                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
691       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
692       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
693       // For those bits in RHS that are known, we can propagate them to known
694       // bits in V shifted to the right by C.
695       KnownZero |= RHSKnownZero << C->getZExtValue();
696       KnownOne  |= RHSKnownOne  << C->getZExtValue();
697     // assume(~(v >> c) = a)
698     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
699                                              m_LShr(m_V, m_ConstantInt(C)),
700                                              m_AShr(m_V, m_ConstantInt(C)))),
701                                    m_Value(A))) &&
702                Pred == ICmpInst::ICMP_EQ &&
703                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
704       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
705       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
706       // For those bits in RHS that are known, we can propagate them inverted
707       // to known bits in V shifted to the right by C.
708       KnownZero |= RHSKnownOne  << C->getZExtValue();
709       KnownOne  |= RHSKnownZero << C->getZExtValue();
710     // assume(v >=_s c) where c is non-negative
711     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
712                Pred == ICmpInst::ICMP_SGE &&
713                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
714       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
715       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
716 
717       if (RHSKnownZero.isNegative()) {
718         // We know that the sign bit is zero.
719         KnownZero |= APInt::getSignBit(BitWidth);
720       }
721     // assume(v >_s c) where c is at least -1.
722     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
723                Pred == ICmpInst::ICMP_SGT &&
724                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
725       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
726       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
727 
728       if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
729         // We know that the sign bit is zero.
730         KnownZero |= APInt::getSignBit(BitWidth);
731       }
732     // assume(v <=_s c) where c is negative
733     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
734                Pred == ICmpInst::ICMP_SLE &&
735                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
736       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
737       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
738 
739       if (RHSKnownOne.isNegative()) {
740         // We know that the sign bit is one.
741         KnownOne |= APInt::getSignBit(BitWidth);
742       }
743     // assume(v <_s c) where c is non-positive
744     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
745                Pred == ICmpInst::ICMP_SLT &&
746                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
747       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
748       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
749 
750       if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
751         // We know that the sign bit is one.
752         KnownOne |= APInt::getSignBit(BitWidth);
753       }
754     // assume(v <=_u c)
755     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
756                Pred == ICmpInst::ICMP_ULE &&
757                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
758       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
759       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
760 
761       // Whatever high bits in c are zero are known to be zero.
762       KnownZero |=
763         APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
764     // assume(v <_u c)
765     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
766                Pred == ICmpInst::ICMP_ULT &&
767                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
768       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
769       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
770 
771       // Whatever high bits in c are zero are known to be zero (if c is a power
772       // of 2, then one more).
773       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
774         KnownZero |=
775           APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1);
776       else
777         KnownZero |=
778           APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
779     }
780   }
781 }
782 
783 // Compute known bits from a shift operator, including those with a
784 // non-constant shift amount. KnownZero and KnownOne are the outputs of this
785 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the
786 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific
787 // functors that, given the known-zero or known-one bits respectively, and a
788 // shift amount, compute the implied known-zero or known-one bits of the shift
789 // operator's result respectively for that shift amount. The results from calling
790 // KZF and KOF are conservatively combined for all permitted shift amounts.
791 template <typename KZFunctor, typename KOFunctor>
792 static void computeKnownBitsFromShiftOperator(Operator *I,
793               APInt &KnownZero, APInt &KnownOne,
794               APInt &KnownZero2, APInt &KnownOne2,
795               unsigned Depth, const Query &Q, KZFunctor KZF, KOFunctor KOF) {
796   unsigned BitWidth = KnownZero.getBitWidth();
797 
798   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
799     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
800 
801     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
802     KnownZero = KZF(KnownZero, ShiftAmt);
803     KnownOne  = KOF(KnownOne, ShiftAmt);
804     return;
805   }
806 
807   computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
808 
809   // Note: We cannot use KnownZero.getLimitedValue() here, because if
810   // BitWidth > 64 and any upper bits are known, we'll end up returning the
811   // limit value (which implies all bits are known).
812   uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue();
813   uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue();
814 
815   // It would be more-clearly correct to use the two temporaries for this
816   // calculation. Reusing the APInts here to prevent unnecessary allocations.
817   KnownZero.clearAllBits();
818   KnownOne.clearAllBits();
819 
820   // If we know the shifter operand is nonzero, we can sometimes infer more
821   // known bits. However this is expensive to compute, so be lazy about it and
822   // only compute it when absolutely necessary.
823   Optional<bool> ShifterOperandIsNonZero;
824 
825   // Early exit if we can't constrain any well-defined shift amount.
826   if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
827     ShifterOperandIsNonZero =
828         isKnownNonZero(I->getOperand(1), Depth + 1, Q);
829     if (!*ShifterOperandIsNonZero)
830       return;
831   }
832 
833   computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
834 
835   KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
836   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
837     // Combine the shifted known input bits only for those shift amounts
838     // compatible with its known constraints.
839     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
840       continue;
841     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
842       continue;
843     // If we know the shifter is nonzero, we may be able to infer more known
844     // bits. This check is sunk down as far as possible to avoid the expensive
845     // call to isKnownNonZero if the cheaper checks above fail.
846     if (ShiftAmt == 0) {
847       if (!ShifterOperandIsNonZero.hasValue())
848         ShifterOperandIsNonZero =
849             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
850       if (*ShifterOperandIsNonZero)
851         continue;
852     }
853 
854     KnownZero &= KZF(KnownZero2, ShiftAmt);
855     KnownOne  &= KOF(KnownOne2, ShiftAmt);
856   }
857 
858   // If there are no compatible shift amounts, then we've proven that the shift
859   // amount must be >= the BitWidth, and the result is undefined. We could
860   // return anything we'd like, but we need to make sure the sets of known bits
861   // stay disjoint (it should be better for some other code to actually
862   // propagate the undef than to pick a value here using known bits).
863   if ((KnownZero & KnownOne) != 0) {
864     KnownZero.clearAllBits();
865     KnownOne.clearAllBits();
866   }
867 }
868 
869 static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
870                                          APInt &KnownOne, unsigned Depth,
871                                          const Query &Q) {
872   unsigned BitWidth = KnownZero.getBitWidth();
873 
874   APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
875   switch (I->getOpcode()) {
876   default: break;
877   case Instruction::Load:
878     if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
879       computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
880     break;
881   case Instruction::And: {
882     // If either the LHS or the RHS are Zero, the result is zero.
883     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
884     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
885 
886     // Output known-1 bits are only known if set in both the LHS & RHS.
887     KnownOne &= KnownOne2;
888     // Output known-0 are known to be clear if zero in either the LHS | RHS.
889     KnownZero |= KnownZero2;
890 
891     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
892     // here we handle the more general case of adding any odd number by
893     // matching the form add(x, add(x, y)) where y is odd.
894     // TODO: This could be generalized to clearing any bit set in y where the
895     // following bit is known to be unset in y.
896     Value *Y = nullptr;
897     if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
898                                       m_Value(Y))) ||
899         match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
900                                       m_Value(Y)))) {
901       APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0);
902       computeKnownBits(Y, KnownZero3, KnownOne3, Depth + 1, Q);
903       if (KnownOne3.countTrailingOnes() > 0)
904         KnownZero |= APInt::getLowBitsSet(BitWidth, 1);
905     }
906     break;
907   }
908   case Instruction::Or: {
909     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
910     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
911 
912     // Output known-0 bits are only known if clear in both the LHS & RHS.
913     KnownZero &= KnownZero2;
914     // Output known-1 are known to be set if set in either the LHS | RHS.
915     KnownOne |= KnownOne2;
916     break;
917   }
918   case Instruction::Xor: {
919     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
920     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
921 
922     // Output known-0 bits are known if clear or set in both the LHS & RHS.
923     APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
924     // Output known-1 are known to be set if set in only one of the LHS, RHS.
925     KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
926     KnownZero = KnownZeroOut;
927     break;
928   }
929   case Instruction::Mul: {
930     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
931     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
932                         KnownOne, KnownZero2, KnownOne2, Depth, Q);
933     break;
934   }
935   case Instruction::UDiv: {
936     // For the purposes of computing leading zeros we can conservatively
937     // treat a udiv as a logical right shift by the power of 2 known to
938     // be less than the denominator.
939     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
940     unsigned LeadZ = KnownZero2.countLeadingOnes();
941 
942     KnownOne2.clearAllBits();
943     KnownZero2.clearAllBits();
944     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
945     unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
946     if (RHSUnknownLeadingOnes != BitWidth)
947       LeadZ = std::min(BitWidth,
948                        LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
949 
950     KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
951     break;
952   }
953   case Instruction::Select: {
954     computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
955     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
956 
957     Value *LHS, *RHS;
958     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
959     if (SelectPatternResult::isMinOrMax(SPF)) {
960       computeKnownBits(RHS, KnownZero, KnownOne, Depth + 1, Q);
961       computeKnownBits(LHS, KnownZero2, KnownOne2, Depth + 1, Q);
962     } else {
963       computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
964       computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
965     }
966 
967     unsigned MaxHighOnes = 0;
968     unsigned MaxHighZeros = 0;
969     if (SPF == SPF_SMAX) {
970       // If both sides are negative, the result is negative.
971       if (KnownOne[BitWidth - 1] && KnownOne2[BitWidth - 1])
972         // We can derive a lower bound on the result by taking the max of the
973         // leading one bits.
974         MaxHighOnes =
975             std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
976       // If either side is non-negative, the result is non-negative.
977       else if (KnownZero[BitWidth - 1] || KnownZero2[BitWidth - 1])
978         MaxHighZeros = 1;
979     } else if (SPF == SPF_SMIN) {
980       // If both sides are non-negative, the result is non-negative.
981       if (KnownZero[BitWidth - 1] && KnownZero2[BitWidth - 1])
982         // We can derive an upper bound on the result by taking the max of the
983         // leading zero bits.
984         MaxHighZeros = std::max(KnownZero.countLeadingOnes(),
985                                 KnownZero2.countLeadingOnes());
986       // If either side is negative, the result is negative.
987       else if (KnownOne[BitWidth - 1] || KnownOne2[BitWidth - 1])
988         MaxHighOnes = 1;
989     } else if (SPF == SPF_UMAX) {
990       // We can derive a lower bound on the result by taking the max of the
991       // leading one bits.
992       MaxHighOnes =
993           std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
994     } else if (SPF == SPF_UMIN) {
995       // We can derive an upper bound on the result by taking the max of the
996       // leading zero bits.
997       MaxHighZeros =
998           std::max(KnownZero.countLeadingOnes(), KnownZero2.countLeadingOnes());
999     }
1000 
1001     // Only known if known in both the LHS and RHS.
1002     KnownOne &= KnownOne2;
1003     KnownZero &= KnownZero2;
1004     if (MaxHighOnes > 0)
1005       KnownOne |= APInt::getHighBitsSet(BitWidth, MaxHighOnes);
1006     if (MaxHighZeros > 0)
1007       KnownZero |= APInt::getHighBitsSet(BitWidth, MaxHighZeros);
1008     break;
1009   }
1010   case Instruction::FPTrunc:
1011   case Instruction::FPExt:
1012   case Instruction::FPToUI:
1013   case Instruction::FPToSI:
1014   case Instruction::SIToFP:
1015   case Instruction::UIToFP:
1016     break; // Can't work with floating point.
1017   case Instruction::PtrToInt:
1018   case Instruction::IntToPtr:
1019   case Instruction::AddrSpaceCast: // Pointers could be different sizes.
1020     // FALL THROUGH and handle them the same as zext/trunc.
1021   case Instruction::ZExt:
1022   case Instruction::Trunc: {
1023     Type *SrcTy = I->getOperand(0)->getType();
1024 
1025     unsigned SrcBitWidth;
1026     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1027     // which fall through here.
1028     SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1029 
1030     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1031     KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
1032     KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
1033     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1034     KnownZero = KnownZero.zextOrTrunc(BitWidth);
1035     KnownOne = KnownOne.zextOrTrunc(BitWidth);
1036     // Any top bits are known to be zero.
1037     if (BitWidth > SrcBitWidth)
1038       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1039     break;
1040   }
1041   case Instruction::BitCast: {
1042     Type *SrcTy = I->getOperand(0)->getType();
1043     if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1044         // TODO: For now, not handling conversions like:
1045         // (bitcast i64 %x to <2 x i32>)
1046         !I->getType()->isVectorTy()) {
1047       computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1048       break;
1049     }
1050     break;
1051   }
1052   case Instruction::SExt: {
1053     // Compute the bits in the result that are not present in the input.
1054     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1055 
1056     KnownZero = KnownZero.trunc(SrcBitWidth);
1057     KnownOne = KnownOne.trunc(SrcBitWidth);
1058     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1059     KnownZero = KnownZero.zext(BitWidth);
1060     KnownOne = KnownOne.zext(BitWidth);
1061 
1062     // If the sign bit of the input is known set or clear, then we know the
1063     // top bits of the result.
1064     if (KnownZero[SrcBitWidth-1])             // Input sign bit known zero
1065       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1066     else if (KnownOne[SrcBitWidth-1])           // Input sign bit known set
1067       KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1068     break;
1069   }
1070   case Instruction::Shl: {
1071     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1072     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1073       return (KnownZero << ShiftAmt) |
1074              APInt::getLowBitsSet(BitWidth, ShiftAmt); // Low bits known 0.
1075     };
1076 
1077     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1078       return KnownOne << ShiftAmt;
1079     };
1080 
1081     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1082                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1083                                       KOF);
1084     break;
1085   }
1086   case Instruction::LShr: {
1087     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1088     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1089       return APIntOps::lshr(KnownZero, ShiftAmt) |
1090              // High bits known zero.
1091              APInt::getHighBitsSet(BitWidth, ShiftAmt);
1092     };
1093 
1094     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1095       return APIntOps::lshr(KnownOne, ShiftAmt);
1096     };
1097 
1098     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1099                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1100                                       KOF);
1101     break;
1102   }
1103   case Instruction::AShr: {
1104     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1105     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1106       return APIntOps::ashr(KnownZero, ShiftAmt);
1107     };
1108 
1109     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1110       return APIntOps::ashr(KnownOne, ShiftAmt);
1111     };
1112 
1113     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1114                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1115                                       KOF);
1116     break;
1117   }
1118   case Instruction::Sub: {
1119     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1120     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1121                            KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1122                            Q);
1123     break;
1124   }
1125   case Instruction::Add: {
1126     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1127     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1128                            KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1129                            Q);
1130     break;
1131   }
1132   case Instruction::SRem:
1133     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1134       APInt RA = Rem->getValue().abs();
1135       if (RA.isPowerOf2()) {
1136         APInt LowBits = RA - 1;
1137         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1,
1138                          Q);
1139 
1140         // The low bits of the first operand are unchanged by the srem.
1141         KnownZero = KnownZero2 & LowBits;
1142         KnownOne = KnownOne2 & LowBits;
1143 
1144         // If the first operand is non-negative or has all low bits zero, then
1145         // the upper bits are all zero.
1146         if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
1147           KnownZero |= ~LowBits;
1148 
1149         // If the first operand is negative and not all low bits are zero, then
1150         // the upper bits are all one.
1151         if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
1152           KnownOne |= ~LowBits;
1153 
1154         assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1155       }
1156     }
1157 
1158     // The sign bit is the LHS's sign bit, except when the result of the
1159     // remainder is zero.
1160     if (KnownZero.isNonNegative()) {
1161       APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
1162       computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
1163                        Q);
1164       // If it's known zero, our sign bit is also zero.
1165       if (LHSKnownZero.isNegative())
1166         KnownZero.setBit(BitWidth - 1);
1167     }
1168 
1169     break;
1170   case Instruction::URem: {
1171     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1172       const APInt &RA = Rem->getValue();
1173       if (RA.isPowerOf2()) {
1174         APInt LowBits = (RA - 1);
1175         computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1176         KnownZero |= ~LowBits;
1177         KnownOne &= LowBits;
1178         break;
1179       }
1180     }
1181 
1182     // Since the result is less than or equal to either operand, any leading
1183     // zero bits in either operand must also exist in the result.
1184     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1185     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
1186 
1187     unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
1188                                 KnownZero2.countLeadingOnes());
1189     KnownOne.clearAllBits();
1190     KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
1191     break;
1192   }
1193 
1194   case Instruction::Alloca: {
1195     AllocaInst *AI = cast<AllocaInst>(I);
1196     unsigned Align = AI->getAlignment();
1197     if (Align == 0)
1198       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1199 
1200     if (Align > 0)
1201       KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1202     break;
1203   }
1204   case Instruction::GetElementPtr: {
1205     // Analyze all of the subscripts of this getelementptr instruction
1206     // to determine if we can prove known low zero bits.
1207     APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1208     computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, Depth + 1,
1209                      Q);
1210     unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1211 
1212     gep_type_iterator GTI = gep_type_begin(I);
1213     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1214       Value *Index = I->getOperand(i);
1215       if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1216         // Handle struct member offset arithmetic.
1217 
1218         // Handle case when index is vector zeroinitializer
1219         Constant *CIndex = cast<Constant>(Index);
1220         if (CIndex->isZeroValue())
1221           continue;
1222 
1223         if (CIndex->getType()->isVectorTy())
1224           Index = CIndex->getSplatValue();
1225 
1226         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1227         const StructLayout *SL = Q.DL.getStructLayout(STy);
1228         uint64_t Offset = SL->getElementOffset(Idx);
1229         TrailZ = std::min<unsigned>(TrailZ,
1230                                     countTrailingZeros(Offset));
1231       } else {
1232         // Handle array index arithmetic.
1233         Type *IndexedTy = GTI.getIndexedType();
1234         if (!IndexedTy->isSized()) {
1235           TrailZ = 0;
1236           break;
1237         }
1238         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1239         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1240         LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1241         computeKnownBits(Index, LocalKnownZero, LocalKnownOne, Depth + 1, Q);
1242         TrailZ = std::min(TrailZ,
1243                           unsigned(countTrailingZeros(TypeSize) +
1244                                    LocalKnownZero.countTrailingOnes()));
1245       }
1246     }
1247 
1248     KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
1249     break;
1250   }
1251   case Instruction::PHI: {
1252     PHINode *P = cast<PHINode>(I);
1253     // Handle the case of a simple two-predecessor recurrence PHI.
1254     // There's a lot more that could theoretically be done here, but
1255     // this is sufficient to catch some interesting cases.
1256     if (P->getNumIncomingValues() == 2) {
1257       for (unsigned i = 0; i != 2; ++i) {
1258         Value *L = P->getIncomingValue(i);
1259         Value *R = P->getIncomingValue(!i);
1260         Operator *LU = dyn_cast<Operator>(L);
1261         if (!LU)
1262           continue;
1263         unsigned Opcode = LU->getOpcode();
1264         // Check for operations that have the property that if
1265         // both their operands have low zero bits, the result
1266         // will have low zero bits.
1267         if (Opcode == Instruction::Add ||
1268             Opcode == Instruction::Sub ||
1269             Opcode == Instruction::And ||
1270             Opcode == Instruction::Or ||
1271             Opcode == Instruction::Mul) {
1272           Value *LL = LU->getOperand(0);
1273           Value *LR = LU->getOperand(1);
1274           // Find a recurrence.
1275           if (LL == I)
1276             L = LR;
1277           else if (LR == I)
1278             L = LL;
1279           else
1280             break;
1281           // Ok, we have a PHI of the form L op= R. Check for low
1282           // zero bits.
1283           computeKnownBits(R, KnownZero2, KnownOne2, Depth + 1, Q);
1284 
1285           // We need to take the minimum number of known bits
1286           APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
1287           computeKnownBits(L, KnownZero3, KnownOne3, Depth + 1, Q);
1288 
1289           KnownZero = APInt::getLowBitsSet(BitWidth,
1290                                            std::min(KnownZero2.countTrailingOnes(),
1291                                                     KnownZero3.countTrailingOnes()));
1292           break;
1293         }
1294       }
1295     }
1296 
1297     // Unreachable blocks may have zero-operand PHI nodes.
1298     if (P->getNumIncomingValues() == 0)
1299       break;
1300 
1301     // Otherwise take the unions of the known bit sets of the operands,
1302     // taking conservative care to avoid excessive recursion.
1303     if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
1304       // Skip if every incoming value references to ourself.
1305       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1306         break;
1307 
1308       KnownZero = APInt::getAllOnesValue(BitWidth);
1309       KnownOne = APInt::getAllOnesValue(BitWidth);
1310       for (Value *IncValue : P->incoming_values()) {
1311         // Skip direct self references.
1312         if (IncValue == P) continue;
1313 
1314         KnownZero2 = APInt(BitWidth, 0);
1315         KnownOne2 = APInt(BitWidth, 0);
1316         // Recurse, but cap the recursion to one level, because we don't
1317         // want to waste time spinning around in loops.
1318         computeKnownBits(IncValue, KnownZero2, KnownOne2, MaxDepth - 1, Q);
1319         KnownZero &= KnownZero2;
1320         KnownOne &= KnownOne2;
1321         // If all bits have been ruled out, there's no need to check
1322         // more operands.
1323         if (!KnownZero && !KnownOne)
1324           break;
1325       }
1326     }
1327     break;
1328   }
1329   case Instruction::Call:
1330   case Instruction::Invoke:
1331     // If range metadata is attached to this call, set known bits from that,
1332     // and then intersect with known bits based on other properties of the
1333     // function.
1334     if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1335       computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
1336     if (Value *RV = CallSite(I).getReturnedArgOperand()) {
1337       computeKnownBits(RV, KnownZero2, KnownOne2, Depth + 1, Q);
1338       KnownZero |= KnownZero2;
1339       KnownOne |= KnownOne2;
1340     }
1341     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1342       switch (II->getIntrinsicID()) {
1343       default: break;
1344       case Intrinsic::bswap:
1345         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1346         KnownZero |= KnownZero2.byteSwap();
1347         KnownOne |= KnownOne2.byteSwap();
1348         break;
1349       case Intrinsic::ctlz:
1350       case Intrinsic::cttz: {
1351         unsigned LowBits = Log2_32(BitWidth)+1;
1352         // If this call is undefined for 0, the result will be less than 2^n.
1353         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1354           LowBits -= 1;
1355         KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1356         break;
1357       }
1358       case Intrinsic::ctpop: {
1359         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1360         // We can bound the space the count needs.  Also, bits known to be zero
1361         // can't contribute to the population.
1362         unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
1363         unsigned LeadingZeros =
1364           APInt(BitWidth, BitsPossiblySet).countLeadingZeros();
1365         assert(LeadingZeros <= BitWidth);
1366         KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros);
1367         KnownOne &= ~KnownZero;
1368         // TODO: we could bound KnownOne using the lower bound on the number
1369         // of bits which might be set provided by popcnt KnownOne2.
1370         break;
1371       }
1372       case Intrinsic::x86_sse42_crc32_64_64:
1373         KnownZero |= APInt::getHighBitsSet(64, 32);
1374         break;
1375       }
1376     }
1377     break;
1378   case Instruction::ExtractValue:
1379     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1380       ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1381       if (EVI->getNumIndices() != 1) break;
1382       if (EVI->getIndices()[0] == 0) {
1383         switch (II->getIntrinsicID()) {
1384         default: break;
1385         case Intrinsic::uadd_with_overflow:
1386         case Intrinsic::sadd_with_overflow:
1387           computeKnownBitsAddSub(true, II->getArgOperand(0),
1388                                  II->getArgOperand(1), false, KnownZero,
1389                                  KnownOne, KnownZero2, KnownOne2, Depth, Q);
1390           break;
1391         case Intrinsic::usub_with_overflow:
1392         case Intrinsic::ssub_with_overflow:
1393           computeKnownBitsAddSub(false, II->getArgOperand(0),
1394                                  II->getArgOperand(1), false, KnownZero,
1395                                  KnownOne, KnownZero2, KnownOne2, Depth, Q);
1396           break;
1397         case Intrinsic::umul_with_overflow:
1398         case Intrinsic::smul_with_overflow:
1399           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1400                               KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1401                               Q);
1402           break;
1403         }
1404       }
1405     }
1406   }
1407 }
1408 
1409 /// Determine which bits of V are known to be either zero or one and return
1410 /// them in the KnownZero/KnownOne bit sets.
1411 ///
1412 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1413 /// we cannot optimize based on the assumption that it is zero without changing
1414 /// it to be an explicit zero.  If we don't change it to zero, other code could
1415 /// optimized based on the contradictory assumption that it is non-zero.
1416 /// Because instcombine aggressively folds operations with undef args anyway,
1417 /// this won't lose us code quality.
1418 ///
1419 /// This function is defined on values with integer type, values with pointer
1420 /// type, and vectors of integers.  In the case
1421 /// where V is a vector, known zero, and known one values are the
1422 /// same width as the vector element, and the bit is set only if it is true
1423 /// for all of the elements in the vector.
1424 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
1425                       unsigned Depth, const Query &Q) {
1426   assert(V && "No Value?");
1427   assert(Depth <= MaxDepth && "Limit Search Depth");
1428   unsigned BitWidth = KnownZero.getBitWidth();
1429 
1430   assert((V->getType()->isIntOrIntVectorTy() ||
1431           V->getType()->getScalarType()->isPointerTy()) &&
1432          "Not integer or pointer type!");
1433   assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1434          (!V->getType()->isIntOrIntVectorTy() ||
1435           V->getType()->getScalarSizeInBits() == BitWidth) &&
1436          KnownZero.getBitWidth() == BitWidth &&
1437          KnownOne.getBitWidth() == BitWidth &&
1438          "V, KnownOne and KnownZero should have same BitWidth");
1439 
1440   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1441     // We know all of the bits for a constant!
1442     KnownOne = CI->getValue();
1443     KnownZero = ~KnownOne;
1444     return;
1445   }
1446   // Null and aggregate-zero are all-zeros.
1447   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1448     KnownOne.clearAllBits();
1449     KnownZero = APInt::getAllOnesValue(BitWidth);
1450     return;
1451   }
1452   // Handle a constant vector by taking the intersection of the known bits of
1453   // each element.
1454   if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1455     // We know that CDS must be a vector of integers. Take the intersection of
1456     // each element.
1457     KnownZero.setAllBits(); KnownOne.setAllBits();
1458     APInt Elt(KnownZero.getBitWidth(), 0);
1459     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1460       Elt = CDS->getElementAsInteger(i);
1461       KnownZero &= ~Elt;
1462       KnownOne &= Elt;
1463     }
1464     return;
1465   }
1466 
1467   if (auto *CV = dyn_cast<ConstantVector>(V)) {
1468     // We know that CV must be a vector of integers. Take the intersection of
1469     // each element.
1470     KnownZero.setAllBits(); KnownOne.setAllBits();
1471     APInt Elt(KnownZero.getBitWidth(), 0);
1472     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1473       Constant *Element = CV->getAggregateElement(i);
1474       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1475       if (!ElementCI) {
1476         KnownZero.clearAllBits();
1477         KnownOne.clearAllBits();
1478         return;
1479       }
1480       Elt = ElementCI->getValue();
1481       KnownZero &= ~Elt;
1482       KnownOne &= Elt;
1483     }
1484     return;
1485   }
1486 
1487   // Start out not knowing anything.
1488   KnownZero.clearAllBits(); KnownOne.clearAllBits();
1489 
1490   // Limit search depth.
1491   // All recursive calls that increase depth must come after this.
1492   if (Depth == MaxDepth)
1493     return;
1494 
1495   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1496   // the bits of its aliasee.
1497   if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1498     if (!GA->isInterposable())
1499       computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q);
1500     return;
1501   }
1502 
1503   if (Operator *I = dyn_cast<Operator>(V))
1504     computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q);
1505 
1506   // Aligned pointers have trailing zeros - refine KnownZero set
1507   if (V->getType()->isPointerTy()) {
1508     unsigned Align = V->getPointerAlignment(Q.DL);
1509     if (Align)
1510       KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1511   }
1512 
1513   // computeKnownBitsFromAssume strictly refines KnownZero and
1514   // KnownOne. Therefore, we run them after computeKnownBitsFromOperator.
1515 
1516   // Check whether a nearby assume intrinsic can determine some known bits.
1517   computeKnownBitsFromAssume(V, KnownZero, KnownOne, Depth, Q);
1518 
1519   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1520 }
1521 
1522 /// Determine whether the sign bit is known to be zero or one.
1523 /// Convenience wrapper around computeKnownBits.
1524 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
1525                     unsigned Depth, const Query &Q) {
1526   unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
1527   if (!BitWidth) {
1528     KnownZero = false;
1529     KnownOne = false;
1530     return;
1531   }
1532   APInt ZeroBits(BitWidth, 0);
1533   APInt OneBits(BitWidth, 0);
1534   computeKnownBits(V, ZeroBits, OneBits, Depth, Q);
1535   KnownOne = OneBits[BitWidth - 1];
1536   KnownZero = ZeroBits[BitWidth - 1];
1537 }
1538 
1539 /// Return true if the given value is known to have exactly one
1540 /// bit set when defined. For vectors return true if every element is known to
1541 /// be a power of two when defined. Supports values with integer or pointer
1542 /// types and vectors of integers.
1543 bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
1544                             const Query &Q) {
1545   if (Constant *C = dyn_cast<Constant>(V)) {
1546     if (C->isNullValue())
1547       return OrZero;
1548 
1549     const APInt *ConstIntOrConstSplatInt;
1550     if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1551       return ConstIntOrConstSplatInt->isPowerOf2();
1552   }
1553 
1554   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1555   // it is shifted off the end then the result is undefined.
1556   if (match(V, m_Shl(m_One(), m_Value())))
1557     return true;
1558 
1559   // (signbit) >>l X is clearly a power of two if the one is not shifted off the
1560   // bottom.  If it is shifted off the bottom then the result is undefined.
1561   if (match(V, m_LShr(m_SignBit(), m_Value())))
1562     return true;
1563 
1564   // The remaining tests are all recursive, so bail out if we hit the limit.
1565   if (Depth++ == MaxDepth)
1566     return false;
1567 
1568   Value *X = nullptr, *Y = nullptr;
1569   // A shift left or a logical shift right of a power of two is a power of two
1570   // or zero.
1571   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1572                  match(V, m_LShr(m_Value(X), m_Value()))))
1573     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1574 
1575   if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1576     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1577 
1578   if (SelectInst *SI = dyn_cast<SelectInst>(V))
1579     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1580            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1581 
1582   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1583     // A power of two and'd with anything is a power of two or zero.
1584     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1585         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1586       return true;
1587     // X & (-X) is always a power of two or zero.
1588     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1589       return true;
1590     return false;
1591   }
1592 
1593   // Adding a power-of-two or zero to the same power-of-two or zero yields
1594   // either the original power-of-two, a larger power-of-two or zero.
1595   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1596     OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1597     if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1598       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1599           match(X, m_And(m_Value(), m_Specific(Y))))
1600         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1601           return true;
1602       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1603           match(Y, m_And(m_Value(), m_Specific(X))))
1604         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1605           return true;
1606 
1607       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1608       APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
1609       computeKnownBits(X, LHSZeroBits, LHSOneBits, Depth, Q);
1610 
1611       APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
1612       computeKnownBits(Y, RHSZeroBits, RHSOneBits, Depth, Q);
1613       // If i8 V is a power of two or zero:
1614       //  ZeroBits: 1 1 1 0 1 1 1 1
1615       // ~ZeroBits: 0 0 0 1 0 0 0 0
1616       if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
1617         // If OrZero isn't set, we cannot give back a zero result.
1618         // Make sure either the LHS or RHS has a bit set.
1619         if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
1620           return true;
1621     }
1622   }
1623 
1624   // An exact divide or right shift can only shift off zero bits, so the result
1625   // is a power of two only if the first operand is a power of two and not
1626   // copying a sign bit (sdiv int_min, 2).
1627   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1628       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1629     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1630                                   Depth, Q);
1631   }
1632 
1633   return false;
1634 }
1635 
1636 /// \brief Test whether a GEP's result is known to be non-null.
1637 ///
1638 /// Uses properties inherent in a GEP to try to determine whether it is known
1639 /// to be non-null.
1640 ///
1641 /// Currently this routine does not support vector GEPs.
1642 static bool isGEPKnownNonNull(GEPOperator *GEP, unsigned Depth,
1643                               const Query &Q) {
1644   if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1645     return false;
1646 
1647   // FIXME: Support vector-GEPs.
1648   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1649 
1650   // If the base pointer is non-null, we cannot walk to a null address with an
1651   // inbounds GEP in address space zero.
1652   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1653     return true;
1654 
1655   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1656   // If so, then the GEP cannot produce a null pointer, as doing so would
1657   // inherently violate the inbounds contract within address space zero.
1658   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1659        GTI != GTE; ++GTI) {
1660     // Struct types are easy -- they must always be indexed by a constant.
1661     if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1662       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1663       unsigned ElementIdx = OpC->getZExtValue();
1664       const StructLayout *SL = Q.DL.getStructLayout(STy);
1665       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1666       if (ElementOffset > 0)
1667         return true;
1668       continue;
1669     }
1670 
1671     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1672     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1673       continue;
1674 
1675     // Fast path the constant operand case both for efficiency and so we don't
1676     // increment Depth when just zipping down an all-constant GEP.
1677     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1678       if (!OpC->isZero())
1679         return true;
1680       continue;
1681     }
1682 
1683     // We post-increment Depth here because while isKnownNonZero increments it
1684     // as well, when we pop back up that increment won't persist. We don't want
1685     // to recurse 10k times just because we have 10k GEP operands. We don't
1686     // bail completely out because we want to handle constant GEPs regardless
1687     // of depth.
1688     if (Depth++ >= MaxDepth)
1689       continue;
1690 
1691     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1692       return true;
1693   }
1694 
1695   return false;
1696 }
1697 
1698 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1699 /// ensure that the value it's attached to is never Value?  'RangeType' is
1700 /// is the type of the value described by the range.
1701 static bool rangeMetadataExcludesValue(MDNode* Ranges, const APInt& Value) {
1702   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1703   assert(NumRanges >= 1);
1704   for (unsigned i = 0; i < NumRanges; ++i) {
1705     ConstantInt *Lower =
1706         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1707     ConstantInt *Upper =
1708         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1709     ConstantRange Range(Lower->getValue(), Upper->getValue());
1710     if (Range.contains(Value))
1711       return false;
1712   }
1713   return true;
1714 }
1715 
1716 /// Return true if the given value is known to be non-zero when defined.
1717 /// For vectors return true if every element is known to be non-zero when
1718 /// defined. Supports values with integer or pointer type and vectors of
1719 /// integers.
1720 bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q) {
1721   if (auto *C = dyn_cast<Constant>(V)) {
1722     if (C->isNullValue())
1723       return false;
1724     if (isa<ConstantInt>(C))
1725       // Must be non-zero due to null test above.
1726       return true;
1727 
1728     // For constant vectors, check that all elements are undefined or known
1729     // non-zero to determine that the whole vector is known non-zero.
1730     if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1731       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1732         Constant *Elt = C->getAggregateElement(i);
1733         if (!Elt || Elt->isNullValue())
1734           return false;
1735         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1736           return false;
1737       }
1738       return true;
1739     }
1740 
1741     return false;
1742   }
1743 
1744   if (auto *I = dyn_cast<Instruction>(V)) {
1745     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1746       // If the possible ranges don't contain zero, then the value is
1747       // definitely non-zero.
1748       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1749         const APInt ZeroValue(Ty->getBitWidth(), 0);
1750         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1751           return true;
1752       }
1753     }
1754   }
1755 
1756   // The remaining tests are all recursive, so bail out if we hit the limit.
1757   if (Depth++ >= MaxDepth)
1758     return false;
1759 
1760   // Check for pointer simplifications.
1761   if (V->getType()->isPointerTy()) {
1762     if (isKnownNonNull(V))
1763       return true;
1764     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1765       if (isGEPKnownNonNull(GEP, Depth, Q))
1766         return true;
1767   }
1768 
1769   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1770 
1771   // X | Y != 0 if X != 0 or Y != 0.
1772   Value *X = nullptr, *Y = nullptr;
1773   if (match(V, m_Or(m_Value(X), m_Value(Y))))
1774     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1775 
1776   // ext X != 0 if X != 0.
1777   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1778     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1779 
1780   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
1781   // if the lowest bit is shifted off the end.
1782   if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1783     // shl nuw can't remove any non-zero bits.
1784     OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1785     if (BO->hasNoUnsignedWrap())
1786       return isKnownNonZero(X, Depth, Q);
1787 
1788     APInt KnownZero(BitWidth, 0);
1789     APInt KnownOne(BitWidth, 0);
1790     computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1791     if (KnownOne[0])
1792       return true;
1793   }
1794   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
1795   // defined if the sign bit is shifted off the end.
1796   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1797     // shr exact can only shift out zero bits.
1798     PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1799     if (BO->isExact())
1800       return isKnownNonZero(X, Depth, Q);
1801 
1802     bool XKnownNonNegative, XKnownNegative;
1803     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1804     if (XKnownNegative)
1805       return true;
1806 
1807     // If the shifter operand is a constant, and all of the bits shifted
1808     // out are known to be zero, and X is known non-zero then at least one
1809     // non-zero bit must remain.
1810     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1811       APInt KnownZero(BitWidth, 0);
1812       APInt KnownOne(BitWidth, 0);
1813       computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1814 
1815       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1816       // Is there a known one in the portion not shifted out?
1817       if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
1818         return true;
1819       // Are all the bits to be shifted out known zero?
1820       if (KnownZero.countTrailingOnes() >= ShiftVal)
1821         return isKnownNonZero(X, Depth, Q);
1822     }
1823   }
1824   // div exact can only produce a zero if the dividend is zero.
1825   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1826     return isKnownNonZero(X, Depth, Q);
1827   }
1828   // X + Y.
1829   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1830     bool XKnownNonNegative, XKnownNegative;
1831     bool YKnownNonNegative, YKnownNegative;
1832     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1833     ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Depth, Q);
1834 
1835     // If X and Y are both non-negative (as signed values) then their sum is not
1836     // zero unless both X and Y are zero.
1837     if (XKnownNonNegative && YKnownNonNegative)
1838       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1839         return true;
1840 
1841     // If X and Y are both negative (as signed values) then their sum is not
1842     // zero unless both X and Y equal INT_MIN.
1843     if (BitWidth && XKnownNegative && YKnownNegative) {
1844       APInt KnownZero(BitWidth, 0);
1845       APInt KnownOne(BitWidth, 0);
1846       APInt Mask = APInt::getSignedMaxValue(BitWidth);
1847       // The sign bit of X is set.  If some other bit is set then X is not equal
1848       // to INT_MIN.
1849       computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1850       if ((KnownOne & Mask) != 0)
1851         return true;
1852       // The sign bit of Y is set.  If some other bit is set then Y is not equal
1853       // to INT_MIN.
1854       computeKnownBits(Y, KnownZero, KnownOne, Depth, Q);
1855       if ((KnownOne & Mask) != 0)
1856         return true;
1857     }
1858 
1859     // The sum of a non-negative number and a power of two is not zero.
1860     if (XKnownNonNegative &&
1861         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1862       return true;
1863     if (YKnownNonNegative &&
1864         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1865       return true;
1866   }
1867   // X * Y.
1868   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1869     OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1870     // If X and Y are non-zero then so is X * Y as long as the multiplication
1871     // does not overflow.
1872     if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1873         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1874       return true;
1875   }
1876   // (C ? X : Y) != 0 if X != 0 and Y != 0.
1877   else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1878     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1879         isKnownNonZero(SI->getFalseValue(), Depth, Q))
1880       return true;
1881   }
1882   // PHI
1883   else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1884     // Try and detect a recurrence that monotonically increases from a
1885     // starting value, as these are common as induction variables.
1886     if (PN->getNumIncomingValues() == 2) {
1887       Value *Start = PN->getIncomingValue(0);
1888       Value *Induction = PN->getIncomingValue(1);
1889       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1890         std::swap(Start, Induction);
1891       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1892         if (!C->isZero() && !C->isNegative()) {
1893           ConstantInt *X;
1894           if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1895                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1896               !X->isNegative())
1897             return true;
1898         }
1899       }
1900     }
1901     // Check if all incoming values are non-zero constant.
1902     bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
1903       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue();
1904     });
1905     if (AllNonZeroConstants)
1906       return true;
1907   }
1908 
1909   if (!BitWidth) return false;
1910   APInt KnownZero(BitWidth, 0);
1911   APInt KnownOne(BitWidth, 0);
1912   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1913   return KnownOne != 0;
1914 }
1915 
1916 /// Return true if V2 == V1 + X, where X is known non-zero.
1917 static bool isAddOfNonZero(Value *V1, Value *V2, const Query &Q) {
1918   BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
1919   if (!BO || BO->getOpcode() != Instruction::Add)
1920     return false;
1921   Value *Op = nullptr;
1922   if (V2 == BO->getOperand(0))
1923     Op = BO->getOperand(1);
1924   else if (V2 == BO->getOperand(1))
1925     Op = BO->getOperand(0);
1926   else
1927     return false;
1928   return isKnownNonZero(Op, 0, Q);
1929 }
1930 
1931 /// Return true if it is known that V1 != V2.
1932 static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q) {
1933   if (V1->getType()->isVectorTy() || V1 == V2)
1934     return false;
1935   if (V1->getType() != V2->getType())
1936     // We can't look through casts yet.
1937     return false;
1938   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
1939     return true;
1940 
1941   if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) {
1942     // Are any known bits in V1 contradictory to known bits in V2? If V1
1943     // has a known zero where V2 has a known one, they must not be equal.
1944     auto BitWidth = Ty->getBitWidth();
1945     APInt KnownZero1(BitWidth, 0);
1946     APInt KnownOne1(BitWidth, 0);
1947     computeKnownBits(V1, KnownZero1, KnownOne1, 0, Q);
1948     APInt KnownZero2(BitWidth, 0);
1949     APInt KnownOne2(BitWidth, 0);
1950     computeKnownBits(V2, KnownZero2, KnownOne2, 0, Q);
1951 
1952     auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1);
1953     if (OppositeBits.getBoolValue())
1954       return true;
1955   }
1956   return false;
1957 }
1958 
1959 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
1960 /// simplify operations downstream. Mask is known to be zero for bits that V
1961 /// cannot have.
1962 ///
1963 /// This function is defined on values with integer type, values with pointer
1964 /// type, and vectors of integers.  In the case
1965 /// where V is a vector, the mask, known zero, and known one values are the
1966 /// same width as the vector element, and the bit is set only if it is true
1967 /// for all of the elements in the vector.
1968 bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth,
1969                        const Query &Q) {
1970   APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
1971   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1972   return (KnownZero & Mask) == Mask;
1973 }
1974 
1975 /// For vector constants, loop over the elements and find the constant with the
1976 /// minimum number of sign bits. Return 0 if the value is not a vector constant
1977 /// or if any element was not analyzed; otherwise, return the count for the
1978 /// element with the minimum number of sign bits.
1979 static unsigned computeNumSignBitsVectorConstant(Value *V, unsigned TyBits) {
1980   auto *CV = dyn_cast<Constant>(V);
1981   if (!CV || !CV->getType()->isVectorTy())
1982     return 0;
1983 
1984   unsigned MinSignBits = TyBits;
1985   unsigned NumElts = CV->getType()->getVectorNumElements();
1986   for (unsigned i = 0; i != NumElts; ++i) {
1987     // If we find a non-ConstantInt, bail out.
1988     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
1989     if (!Elt)
1990       return 0;
1991 
1992     // If the sign bit is 1, flip the bits, so we always count leading zeros.
1993     APInt EltVal = Elt->getValue();
1994     if (EltVal.isNegative())
1995       EltVal = ~EltVal;
1996     MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros());
1997   }
1998 
1999   return MinSignBits;
2000 }
2001 
2002 /// Return the number of times the sign bit of the register is replicated into
2003 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2004 /// (itself), but other cases can give us information. For example, immediately
2005 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2006 /// other, so we return 3. For vectors, return the number of sign bits for the
2007 /// vector element with the mininum number of known sign bits.
2008 unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q) {
2009   unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2010   unsigned Tmp, Tmp2;
2011   unsigned FirstAnswer = 1;
2012 
2013   // Note that ConstantInt is handled by the general computeKnownBits case
2014   // below.
2015 
2016   if (Depth == 6)
2017     return 1;  // Limit search depth.
2018 
2019   Operator *U = dyn_cast<Operator>(V);
2020   switch (Operator::getOpcode(V)) {
2021   default: break;
2022   case Instruction::SExt:
2023     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2024     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2025 
2026   case Instruction::SDiv: {
2027     const APInt *Denominator;
2028     // sdiv X, C -> adds log(C) sign bits.
2029     if (match(U->getOperand(1), m_APInt(Denominator))) {
2030 
2031       // Ignore non-positive denominator.
2032       if (!Denominator->isStrictlyPositive())
2033         break;
2034 
2035       // Calculate the incoming numerator bits.
2036       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2037 
2038       // Add floor(log(C)) bits to the numerator bits.
2039       return std::min(TyBits, NumBits + Denominator->logBase2());
2040     }
2041     break;
2042   }
2043 
2044   case Instruction::SRem: {
2045     const APInt *Denominator;
2046     // srem X, C -> we know that the result is within [-C+1,C) when C is a
2047     // positive constant.  This let us put a lower bound on the number of sign
2048     // bits.
2049     if (match(U->getOperand(1), m_APInt(Denominator))) {
2050 
2051       // Ignore non-positive denominator.
2052       if (!Denominator->isStrictlyPositive())
2053         break;
2054 
2055       // Calculate the incoming numerator bits. SRem by a positive constant
2056       // can't lower the number of sign bits.
2057       unsigned NumrBits =
2058           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2059 
2060       // Calculate the leading sign bit constraints by examining the
2061       // denominator.  Given that the denominator is positive, there are two
2062       // cases:
2063       //
2064       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
2065       //     (1 << ceilLogBase2(C)).
2066       //
2067       //  2. the numerator is negative.  Then the result range is (-C,0] and
2068       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2069       //
2070       // Thus a lower bound on the number of sign bits is `TyBits -
2071       // ceilLogBase2(C)`.
2072 
2073       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2074       return std::max(NumrBits, ResBits);
2075     }
2076     break;
2077   }
2078 
2079   case Instruction::AShr: {
2080     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2081     // ashr X, C   -> adds C sign bits.  Vectors too.
2082     const APInt *ShAmt;
2083     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2084       Tmp += ShAmt->getZExtValue();
2085       if (Tmp > TyBits) Tmp = TyBits;
2086     }
2087     return Tmp;
2088   }
2089   case Instruction::Shl: {
2090     const APInt *ShAmt;
2091     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2092       // shl destroys sign bits.
2093       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2094       Tmp2 = ShAmt->getZExtValue();
2095       if (Tmp2 >= TyBits ||      // Bad shift.
2096           Tmp2 >= Tmp) break;    // Shifted all sign bits out.
2097       return Tmp - Tmp2;
2098     }
2099     break;
2100   }
2101   case Instruction::And:
2102   case Instruction::Or:
2103   case Instruction::Xor:    // NOT is handled here.
2104     // Logical binary ops preserve the number of sign bits at the worst.
2105     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2106     if (Tmp != 1) {
2107       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2108       FirstAnswer = std::min(Tmp, Tmp2);
2109       // We computed what we know about the sign bits as our first
2110       // answer. Now proceed to the generic code that uses
2111       // computeKnownBits, and pick whichever answer is better.
2112     }
2113     break;
2114 
2115   case Instruction::Select:
2116     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2117     if (Tmp == 1) return 1;  // Early out.
2118     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2119     return std::min(Tmp, Tmp2);
2120 
2121   case Instruction::Add:
2122     // Add can have at most one carry bit.  Thus we know that the output
2123     // is, at worst, one more bit than the inputs.
2124     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2125     if (Tmp == 1) return 1;  // Early out.
2126 
2127     // Special case decrementing a value (ADD X, -1):
2128     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2129       if (CRHS->isAllOnesValue()) {
2130         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2131         computeKnownBits(U->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
2132 
2133         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2134         // sign bits set.
2135         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2136           return TyBits;
2137 
2138         // If we are subtracting one from a positive number, there is no carry
2139         // out of the result.
2140         if (KnownZero.isNegative())
2141           return Tmp;
2142       }
2143 
2144     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2145     if (Tmp2 == 1) return 1;
2146     return std::min(Tmp, Tmp2)-1;
2147 
2148   case Instruction::Sub:
2149     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2150     if (Tmp2 == 1) return 1;
2151 
2152     // Handle NEG.
2153     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2154       if (CLHS->isNullValue()) {
2155         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2156         computeKnownBits(U->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
2157         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2158         // sign bits set.
2159         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2160           return TyBits;
2161 
2162         // If the input is known to be positive (the sign bit is known clear),
2163         // the output of the NEG has the same number of sign bits as the input.
2164         if (KnownZero.isNegative())
2165           return Tmp2;
2166 
2167         // Otherwise, we treat this like a SUB.
2168       }
2169 
2170     // Sub can have at most one carry bit.  Thus we know that the output
2171     // is, at worst, one more bit than the inputs.
2172     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2173     if (Tmp == 1) return 1;  // Early out.
2174     return std::min(Tmp, Tmp2)-1;
2175 
2176   case Instruction::PHI: {
2177     PHINode *PN = cast<PHINode>(U);
2178     unsigned NumIncomingValues = PN->getNumIncomingValues();
2179     // Don't analyze large in-degree PHIs.
2180     if (NumIncomingValues > 4) break;
2181     // Unreachable blocks may have zero-operand PHI nodes.
2182     if (NumIncomingValues == 0) break;
2183 
2184     // Take the minimum of all incoming values.  This can't infinitely loop
2185     // because of our depth threshold.
2186     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2187     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2188       if (Tmp == 1) return Tmp;
2189       Tmp = std::min(
2190           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2191     }
2192     return Tmp;
2193   }
2194 
2195   case Instruction::Trunc:
2196     // FIXME: it's tricky to do anything useful for this, but it is an important
2197     // case for targets like X86.
2198     break;
2199   }
2200 
2201   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2202   // use this information.
2203 
2204   // If we can examine all elements of a vector constant successfully, we're
2205   // done (we can't do any better than that). If not, keep trying.
2206   if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2207     return VecSignBits;
2208 
2209   APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2210   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2211 
2212   // If we know that the sign bit is either zero or one, determine the number of
2213   // identical bits in the top of the input value.
2214   if (KnownZero.isNegative())
2215     return std::max(FirstAnswer, KnownZero.countLeadingOnes());
2216 
2217   if (KnownOne.isNegative())
2218     return std::max(FirstAnswer, KnownOne.countLeadingOnes());
2219 
2220   // computeKnownBits gave us no extra information about the top bits.
2221   return FirstAnswer;
2222 }
2223 
2224 /// This function computes the integer multiple of Base that equals V.
2225 /// If successful, it returns true and returns the multiple in
2226 /// Multiple. If unsuccessful, it returns false. It looks
2227 /// through SExt instructions only if LookThroughSExt is true.
2228 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2229                            bool LookThroughSExt, unsigned Depth) {
2230   const unsigned MaxDepth = 6;
2231 
2232   assert(V && "No Value?");
2233   assert(Depth <= MaxDepth && "Limit Search Depth");
2234   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2235 
2236   Type *T = V->getType();
2237 
2238   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2239 
2240   if (Base == 0)
2241     return false;
2242 
2243   if (Base == 1) {
2244     Multiple = V;
2245     return true;
2246   }
2247 
2248   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2249   Constant *BaseVal = ConstantInt::get(T, Base);
2250   if (CO && CO == BaseVal) {
2251     // Multiple is 1.
2252     Multiple = ConstantInt::get(T, 1);
2253     return true;
2254   }
2255 
2256   if (CI && CI->getZExtValue() % Base == 0) {
2257     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2258     return true;
2259   }
2260 
2261   if (Depth == MaxDepth) return false;  // Limit search depth.
2262 
2263   Operator *I = dyn_cast<Operator>(V);
2264   if (!I) return false;
2265 
2266   switch (I->getOpcode()) {
2267   default: break;
2268   case Instruction::SExt:
2269     if (!LookThroughSExt) return false;
2270     // otherwise fall through to ZExt
2271   case Instruction::ZExt:
2272     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2273                            LookThroughSExt, Depth+1);
2274   case Instruction::Shl:
2275   case Instruction::Mul: {
2276     Value *Op0 = I->getOperand(0);
2277     Value *Op1 = I->getOperand(1);
2278 
2279     if (I->getOpcode() == Instruction::Shl) {
2280       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2281       if (!Op1CI) return false;
2282       // Turn Op0 << Op1 into Op0 * 2^Op1
2283       APInt Op1Int = Op1CI->getValue();
2284       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2285       APInt API(Op1Int.getBitWidth(), 0);
2286       API.setBit(BitToSet);
2287       Op1 = ConstantInt::get(V->getContext(), API);
2288     }
2289 
2290     Value *Mul0 = nullptr;
2291     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2292       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2293         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2294           if (Op1C->getType()->getPrimitiveSizeInBits() <
2295               MulC->getType()->getPrimitiveSizeInBits())
2296             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2297           if (Op1C->getType()->getPrimitiveSizeInBits() >
2298               MulC->getType()->getPrimitiveSizeInBits())
2299             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2300 
2301           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2302           Multiple = ConstantExpr::getMul(MulC, Op1C);
2303           return true;
2304         }
2305 
2306       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2307         if (Mul0CI->getValue() == 1) {
2308           // V == Base * Op1, so return Op1
2309           Multiple = Op1;
2310           return true;
2311         }
2312     }
2313 
2314     Value *Mul1 = nullptr;
2315     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2316       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2317         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2318           if (Op0C->getType()->getPrimitiveSizeInBits() <
2319               MulC->getType()->getPrimitiveSizeInBits())
2320             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2321           if (Op0C->getType()->getPrimitiveSizeInBits() >
2322               MulC->getType()->getPrimitiveSizeInBits())
2323             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2324 
2325           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2326           Multiple = ConstantExpr::getMul(MulC, Op0C);
2327           return true;
2328         }
2329 
2330       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2331         if (Mul1CI->getValue() == 1) {
2332           // V == Base * Op0, so return Op0
2333           Multiple = Op0;
2334           return true;
2335         }
2336     }
2337   }
2338   }
2339 
2340   // We could not determine if V is a multiple of Base.
2341   return false;
2342 }
2343 
2344 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2345                                             const TargetLibraryInfo *TLI) {
2346   const Function *F = ICS.getCalledFunction();
2347   if (!F)
2348     return Intrinsic::not_intrinsic;
2349 
2350   if (F->isIntrinsic())
2351     return F->getIntrinsicID();
2352 
2353   if (!TLI)
2354     return Intrinsic::not_intrinsic;
2355 
2356   LibFunc::Func Func;
2357   // We're going to make assumptions on the semantics of the functions, check
2358   // that the target knows that it's available in this environment and it does
2359   // not have local linkage.
2360   if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2361     return Intrinsic::not_intrinsic;
2362 
2363   if (!ICS.onlyReadsMemory())
2364     return Intrinsic::not_intrinsic;
2365 
2366   // Otherwise check if we have a call to a function that can be turned into a
2367   // vector intrinsic.
2368   switch (Func) {
2369   default:
2370     break;
2371   case LibFunc::sin:
2372   case LibFunc::sinf:
2373   case LibFunc::sinl:
2374     return Intrinsic::sin;
2375   case LibFunc::cos:
2376   case LibFunc::cosf:
2377   case LibFunc::cosl:
2378     return Intrinsic::cos;
2379   case LibFunc::exp:
2380   case LibFunc::expf:
2381   case LibFunc::expl:
2382     return Intrinsic::exp;
2383   case LibFunc::exp2:
2384   case LibFunc::exp2f:
2385   case LibFunc::exp2l:
2386     return Intrinsic::exp2;
2387   case LibFunc::log:
2388   case LibFunc::logf:
2389   case LibFunc::logl:
2390     return Intrinsic::log;
2391   case LibFunc::log10:
2392   case LibFunc::log10f:
2393   case LibFunc::log10l:
2394     return Intrinsic::log10;
2395   case LibFunc::log2:
2396   case LibFunc::log2f:
2397   case LibFunc::log2l:
2398     return Intrinsic::log2;
2399   case LibFunc::fabs:
2400   case LibFunc::fabsf:
2401   case LibFunc::fabsl:
2402     return Intrinsic::fabs;
2403   case LibFunc::fmin:
2404   case LibFunc::fminf:
2405   case LibFunc::fminl:
2406     return Intrinsic::minnum;
2407   case LibFunc::fmax:
2408   case LibFunc::fmaxf:
2409   case LibFunc::fmaxl:
2410     return Intrinsic::maxnum;
2411   case LibFunc::copysign:
2412   case LibFunc::copysignf:
2413   case LibFunc::copysignl:
2414     return Intrinsic::copysign;
2415   case LibFunc::floor:
2416   case LibFunc::floorf:
2417   case LibFunc::floorl:
2418     return Intrinsic::floor;
2419   case LibFunc::ceil:
2420   case LibFunc::ceilf:
2421   case LibFunc::ceill:
2422     return Intrinsic::ceil;
2423   case LibFunc::trunc:
2424   case LibFunc::truncf:
2425   case LibFunc::truncl:
2426     return Intrinsic::trunc;
2427   case LibFunc::rint:
2428   case LibFunc::rintf:
2429   case LibFunc::rintl:
2430     return Intrinsic::rint;
2431   case LibFunc::nearbyint:
2432   case LibFunc::nearbyintf:
2433   case LibFunc::nearbyintl:
2434     return Intrinsic::nearbyint;
2435   case LibFunc::round:
2436   case LibFunc::roundf:
2437   case LibFunc::roundl:
2438     return Intrinsic::round;
2439   case LibFunc::pow:
2440   case LibFunc::powf:
2441   case LibFunc::powl:
2442     return Intrinsic::pow;
2443   case LibFunc::sqrt:
2444   case LibFunc::sqrtf:
2445   case LibFunc::sqrtl:
2446     if (ICS->hasNoNaNs())
2447       return Intrinsic::sqrt;
2448     return Intrinsic::not_intrinsic;
2449   }
2450 
2451   return Intrinsic::not_intrinsic;
2452 }
2453 
2454 /// Return true if we can prove that the specified FP value is never equal to
2455 /// -0.0.
2456 ///
2457 /// NOTE: this function will need to be revisited when we support non-default
2458 /// rounding modes!
2459 ///
2460 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2461                                 unsigned Depth) {
2462   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2463     return !CFP->getValueAPF().isNegZero();
2464 
2465   // FIXME: Magic number! At the least, this should be given a name because it's
2466   // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to
2467   // expose it as a parameter, so it can be used for testing / experimenting.
2468   if (Depth == 6)
2469     return false;  // Limit search depth.
2470 
2471   const Operator *I = dyn_cast<Operator>(V);
2472   if (!I) return false;
2473 
2474   // Check if the nsz fast-math flag is set
2475   if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2476     if (FPO->hasNoSignedZeros())
2477       return true;
2478 
2479   // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2480   if (I->getOpcode() == Instruction::FAdd)
2481     if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2482       if (CFP->isNullValue())
2483         return true;
2484 
2485   // sitofp and uitofp turn into +0.0 for zero.
2486   if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2487     return true;
2488 
2489   if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2490     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2491     switch (IID) {
2492     default:
2493       break;
2494     // sqrt(-0.0) = -0.0, no other negative results are possible.
2495     case Intrinsic::sqrt:
2496       return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1);
2497     // fabs(x) != -0.0
2498     case Intrinsic::fabs:
2499       return true;
2500     }
2501   }
2502 
2503   return false;
2504 }
2505 
2506 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2507                                        const TargetLibraryInfo *TLI,
2508                                        unsigned Depth) {
2509   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2510     return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
2511 
2512   // FIXME: Magic number! At the least, this should be given a name because it's
2513   // used similarly in CannotBeNegativeZero(). A better fix may be to
2514   // expose it as a parameter, so it can be used for testing / experimenting.
2515   if (Depth == 6)
2516     return false;  // Limit search depth.
2517 
2518   const Operator *I = dyn_cast<Operator>(V);
2519   if (!I) return false;
2520 
2521   switch (I->getOpcode()) {
2522   default: break;
2523   // Unsigned integers are always nonnegative.
2524   case Instruction::UIToFP:
2525     return true;
2526   case Instruction::FMul:
2527     // x*x is always non-negative or a NaN.
2528     if (I->getOperand(0) == I->getOperand(1))
2529       return true;
2530     // Fall through
2531   case Instruction::FAdd:
2532   case Instruction::FDiv:
2533   case Instruction::FRem:
2534     return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
2535            CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2536   case Instruction::Select:
2537     return CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1) &&
2538            CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
2539   case Instruction::FPExt:
2540   case Instruction::FPTrunc:
2541     // Widening/narrowing never change sign.
2542     return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
2543   case Instruction::Call:
2544     Intrinsic::ID IID = getIntrinsicForCallSite(cast<CallInst>(I), TLI);
2545     switch (IID) {
2546     default:
2547       break;
2548     case Intrinsic::maxnum:
2549       return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) ||
2550              CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2551     case Intrinsic::minnum:
2552       return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
2553              CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2554     case Intrinsic::exp:
2555     case Intrinsic::exp2:
2556     case Intrinsic::fabs:
2557     case Intrinsic::sqrt:
2558       return true;
2559     case Intrinsic::powi:
2560       if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
2561         // powi(x,n) is non-negative if n is even.
2562         if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
2563           return true;
2564       }
2565       return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
2566     case Intrinsic::fma:
2567     case Intrinsic::fmuladd:
2568       // x*x+y is non-negative if y is non-negative.
2569       return I->getOperand(0) == I->getOperand(1) &&
2570              CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
2571     }
2572     break;
2573   }
2574   return false;
2575 }
2576 
2577 /// If the specified value can be set by repeating the same byte in memory,
2578 /// return the i8 value that it is represented with.  This is
2579 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2580 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
2581 /// byte store (e.g. i16 0x1234), return null.
2582 Value *llvm::isBytewiseValue(Value *V) {
2583   // All byte-wide stores are splatable, even of arbitrary variables.
2584   if (V->getType()->isIntegerTy(8)) return V;
2585 
2586   // Handle 'null' ConstantArrayZero etc.
2587   if (Constant *C = dyn_cast<Constant>(V))
2588     if (C->isNullValue())
2589       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2590 
2591   // Constant float and double values can be handled as integer values if the
2592   // corresponding integer value is "byteable".  An important case is 0.0.
2593   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2594     if (CFP->getType()->isFloatTy())
2595       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2596     if (CFP->getType()->isDoubleTy())
2597       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2598     // Don't handle long double formats, which have strange constraints.
2599   }
2600 
2601   // We can handle constant integers that are multiple of 8 bits.
2602   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2603     if (CI->getBitWidth() % 8 == 0) {
2604       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2605 
2606       if (!CI->getValue().isSplat(8))
2607         return nullptr;
2608       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2609     }
2610   }
2611 
2612   // A ConstantDataArray/Vector is splatable if all its members are equal and
2613   // also splatable.
2614   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2615     Value *Elt = CA->getElementAsConstant(0);
2616     Value *Val = isBytewiseValue(Elt);
2617     if (!Val)
2618       return nullptr;
2619 
2620     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2621       if (CA->getElementAsConstant(I) != Elt)
2622         return nullptr;
2623 
2624     return Val;
2625   }
2626 
2627   // Conceptually, we could handle things like:
2628   //   %a = zext i8 %X to i16
2629   //   %b = shl i16 %a, 8
2630   //   %c = or i16 %a, %b
2631   // but until there is an example that actually needs this, it doesn't seem
2632   // worth worrying about.
2633   return nullptr;
2634 }
2635 
2636 
2637 // This is the recursive version of BuildSubAggregate. It takes a few different
2638 // arguments. Idxs is the index within the nested struct From that we are
2639 // looking at now (which is of type IndexedType). IdxSkip is the number of
2640 // indices from Idxs that should be left out when inserting into the resulting
2641 // struct. To is the result struct built so far, new insertvalue instructions
2642 // build on that.
2643 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2644                                 SmallVectorImpl<unsigned> &Idxs,
2645                                 unsigned IdxSkip,
2646                                 Instruction *InsertBefore) {
2647   llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2648   if (STy) {
2649     // Save the original To argument so we can modify it
2650     Value *OrigTo = To;
2651     // General case, the type indexed by Idxs is a struct
2652     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2653       // Process each struct element recursively
2654       Idxs.push_back(i);
2655       Value *PrevTo = To;
2656       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2657                              InsertBefore);
2658       Idxs.pop_back();
2659       if (!To) {
2660         // Couldn't find any inserted value for this index? Cleanup
2661         while (PrevTo != OrigTo) {
2662           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2663           PrevTo = Del->getAggregateOperand();
2664           Del->eraseFromParent();
2665         }
2666         // Stop processing elements
2667         break;
2668       }
2669     }
2670     // If we successfully found a value for each of our subaggregates
2671     if (To)
2672       return To;
2673   }
2674   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2675   // the struct's elements had a value that was inserted directly. In the latter
2676   // case, perhaps we can't determine each of the subelements individually, but
2677   // we might be able to find the complete struct somewhere.
2678 
2679   // Find the value that is at that particular spot
2680   Value *V = FindInsertedValue(From, Idxs);
2681 
2682   if (!V)
2683     return nullptr;
2684 
2685   // Insert the value in the new (sub) aggregrate
2686   return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2687                                        "tmp", InsertBefore);
2688 }
2689 
2690 // This helper takes a nested struct and extracts a part of it (which is again a
2691 // struct) into a new value. For example, given the struct:
2692 // { a, { b, { c, d }, e } }
2693 // and the indices "1, 1" this returns
2694 // { c, d }.
2695 //
2696 // It does this by inserting an insertvalue for each element in the resulting
2697 // struct, as opposed to just inserting a single struct. This will only work if
2698 // each of the elements of the substruct are known (ie, inserted into From by an
2699 // insertvalue instruction somewhere).
2700 //
2701 // All inserted insertvalue instructions are inserted before InsertBefore
2702 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2703                                 Instruction *InsertBefore) {
2704   assert(InsertBefore && "Must have someplace to insert!");
2705   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2706                                                              idx_range);
2707   Value *To = UndefValue::get(IndexedType);
2708   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2709   unsigned IdxSkip = Idxs.size();
2710 
2711   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2712 }
2713 
2714 /// Given an aggregrate and an sequence of indices, see if
2715 /// the scalar value indexed is already around as a register, for example if it
2716 /// were inserted directly into the aggregrate.
2717 ///
2718 /// If InsertBefore is not null, this function will duplicate (modified)
2719 /// insertvalues when a part of a nested struct is extracted.
2720 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2721                                Instruction *InsertBefore) {
2722   // Nothing to index? Just return V then (this is useful at the end of our
2723   // recursion).
2724   if (idx_range.empty())
2725     return V;
2726   // We have indices, so V should have an indexable type.
2727   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2728          "Not looking at a struct or array?");
2729   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2730          "Invalid indices for type?");
2731 
2732   if (Constant *C = dyn_cast<Constant>(V)) {
2733     C = C->getAggregateElement(idx_range[0]);
2734     if (!C) return nullptr;
2735     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2736   }
2737 
2738   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2739     // Loop the indices for the insertvalue instruction in parallel with the
2740     // requested indices
2741     const unsigned *req_idx = idx_range.begin();
2742     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2743          i != e; ++i, ++req_idx) {
2744       if (req_idx == idx_range.end()) {
2745         // We can't handle this without inserting insertvalues
2746         if (!InsertBefore)
2747           return nullptr;
2748 
2749         // The requested index identifies a part of a nested aggregate. Handle
2750         // this specially. For example,
2751         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2752         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2753         // %C = extractvalue {i32, { i32, i32 } } %B, 1
2754         // This can be changed into
2755         // %A = insertvalue {i32, i32 } undef, i32 10, 0
2756         // %C = insertvalue {i32, i32 } %A, i32 11, 1
2757         // which allows the unused 0,0 element from the nested struct to be
2758         // removed.
2759         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2760                                  InsertBefore);
2761       }
2762 
2763       // This insert value inserts something else than what we are looking for.
2764       // See if the (aggregate) value inserted into has the value we are
2765       // looking for, then.
2766       if (*req_idx != *i)
2767         return FindInsertedValue(I->getAggregateOperand(), idx_range,
2768                                  InsertBefore);
2769     }
2770     // If we end up here, the indices of the insertvalue match with those
2771     // requested (though possibly only partially). Now we recursively look at
2772     // the inserted value, passing any remaining indices.
2773     return FindInsertedValue(I->getInsertedValueOperand(),
2774                              makeArrayRef(req_idx, idx_range.end()),
2775                              InsertBefore);
2776   }
2777 
2778   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2779     // If we're extracting a value from an aggregate that was extracted from
2780     // something else, we can extract from that something else directly instead.
2781     // However, we will need to chain I's indices with the requested indices.
2782 
2783     // Calculate the number of indices required
2784     unsigned size = I->getNumIndices() + idx_range.size();
2785     // Allocate some space to put the new indices in
2786     SmallVector<unsigned, 5> Idxs;
2787     Idxs.reserve(size);
2788     // Add indices from the extract value instruction
2789     Idxs.append(I->idx_begin(), I->idx_end());
2790 
2791     // Add requested indices
2792     Idxs.append(idx_range.begin(), idx_range.end());
2793 
2794     assert(Idxs.size() == size
2795            && "Number of indices added not correct?");
2796 
2797     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2798   }
2799   // Otherwise, we don't know (such as, extracting from a function return value
2800   // or load instruction)
2801   return nullptr;
2802 }
2803 
2804 /// Analyze the specified pointer to see if it can be expressed as a base
2805 /// pointer plus a constant offset. Return the base and offset to the caller.
2806 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2807                                               const DataLayout &DL) {
2808   unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2809   APInt ByteOffset(BitWidth, 0);
2810 
2811   // We walk up the defs but use a visited set to handle unreachable code. In
2812   // that case, we stop after accumulating the cycle once (not that it
2813   // matters).
2814   SmallPtrSet<Value *, 16> Visited;
2815   while (Visited.insert(Ptr).second) {
2816     if (Ptr->getType()->isVectorTy())
2817       break;
2818 
2819     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2820       APInt GEPOffset(BitWidth, 0);
2821       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2822         break;
2823 
2824       ByteOffset += GEPOffset;
2825 
2826       Ptr = GEP->getPointerOperand();
2827     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2828                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2829       Ptr = cast<Operator>(Ptr)->getOperand(0);
2830     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2831       if (GA->isInterposable())
2832         break;
2833       Ptr = GA->getAliasee();
2834     } else {
2835       break;
2836     }
2837   }
2838   Offset = ByteOffset.getSExtValue();
2839   return Ptr;
2840 }
2841 
2842 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) {
2843   // Make sure the GEP has exactly three arguments.
2844   if (GEP->getNumOperands() != 3)
2845     return false;
2846 
2847   // Make sure the index-ee is a pointer to array of i8.
2848   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
2849   if (!AT || !AT->getElementType()->isIntegerTy(8))
2850     return false;
2851 
2852   // Check to make sure that the first operand of the GEP is an integer and
2853   // has value 0 so that we are sure we're indexing into the initializer.
2854   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
2855   if (!FirstIdx || !FirstIdx->isZero())
2856     return false;
2857 
2858   return true;
2859 }
2860 
2861 /// This function computes the length of a null-terminated C string pointed to
2862 /// by V. If successful, it returns true and returns the string in Str.
2863 /// If unsuccessful, it returns false.
2864 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
2865                                  uint64_t Offset, bool TrimAtNul) {
2866   assert(V);
2867 
2868   // Look through bitcast instructions and geps.
2869   V = V->stripPointerCasts();
2870 
2871   // If the value is a GEP instruction or constant expression, treat it as an
2872   // offset.
2873   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2874     // The GEP operator should be based on a pointer to string constant, and is
2875     // indexing into the string constant.
2876     if (!isGEPBasedOnPointerToString(GEP))
2877       return false;
2878 
2879     // If the second index isn't a ConstantInt, then this is a variable index
2880     // into the array.  If this occurs, we can't say anything meaningful about
2881     // the string.
2882     uint64_t StartIdx = 0;
2883     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
2884       StartIdx = CI->getZExtValue();
2885     else
2886       return false;
2887     return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
2888                                  TrimAtNul);
2889   }
2890 
2891   // The GEP instruction, constant or instruction, must reference a global
2892   // variable that is a constant and is initialized. The referenced constant
2893   // initializer is the array that we'll use for optimization.
2894   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2895   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
2896     return false;
2897 
2898   // Handle the all-zeros case.
2899   if (GV->getInitializer()->isNullValue()) {
2900     // This is a degenerate case. The initializer is constant zero so the
2901     // length of the string must be zero.
2902     Str = "";
2903     return true;
2904   }
2905 
2906   // This must be a ConstantDataArray.
2907   const auto *Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
2908   if (!Array || !Array->isString())
2909     return false;
2910 
2911   // Get the number of elements in the array.
2912   uint64_t NumElts = Array->getType()->getArrayNumElements();
2913 
2914   // Start out with the entire array in the StringRef.
2915   Str = Array->getAsString();
2916 
2917   if (Offset > NumElts)
2918     return false;
2919 
2920   // Skip over 'offset' bytes.
2921   Str = Str.substr(Offset);
2922 
2923   if (TrimAtNul) {
2924     // Trim off the \0 and anything after it.  If the array is not nul
2925     // terminated, we just return the whole end of string.  The client may know
2926     // some other way that the string is length-bound.
2927     Str = Str.substr(0, Str.find('\0'));
2928   }
2929   return true;
2930 }
2931 
2932 // These next two are very similar to the above, but also look through PHI
2933 // nodes.
2934 // TODO: See if we can integrate these two together.
2935 
2936 /// If we can compute the length of the string pointed to by
2937 /// the specified pointer, return 'len+1'.  If we can't, return 0.
2938 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) {
2939   // Look through noop bitcast instructions.
2940   V = V->stripPointerCasts();
2941 
2942   // If this is a PHI node, there are two cases: either we have already seen it
2943   // or we haven't.
2944   if (PHINode *PN = dyn_cast<PHINode>(V)) {
2945     if (!PHIs.insert(PN).second)
2946       return ~0ULL;  // already in the set.
2947 
2948     // If it was new, see if all the input strings are the same length.
2949     uint64_t LenSoFar = ~0ULL;
2950     for (Value *IncValue : PN->incoming_values()) {
2951       uint64_t Len = GetStringLengthH(IncValue, PHIs);
2952       if (Len == 0) return 0; // Unknown length -> unknown.
2953 
2954       if (Len == ~0ULL) continue;
2955 
2956       if (Len != LenSoFar && LenSoFar != ~0ULL)
2957         return 0;    // Disagree -> unknown.
2958       LenSoFar = Len;
2959     }
2960 
2961     // Success, all agree.
2962     return LenSoFar;
2963   }
2964 
2965   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
2966   if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
2967     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
2968     if (Len1 == 0) return 0;
2969     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
2970     if (Len2 == 0) return 0;
2971     if (Len1 == ~0ULL) return Len2;
2972     if (Len2 == ~0ULL) return Len1;
2973     if (Len1 != Len2) return 0;
2974     return Len1;
2975   }
2976 
2977   // Otherwise, see if we can read the string.
2978   StringRef StrData;
2979   if (!getConstantStringInfo(V, StrData))
2980     return 0;
2981 
2982   return StrData.size()+1;
2983 }
2984 
2985 /// If we can compute the length of the string pointed to by
2986 /// the specified pointer, return 'len+1'.  If we can't, return 0.
2987 uint64_t llvm::GetStringLength(Value *V) {
2988   if (!V->getType()->isPointerTy()) return 0;
2989 
2990   SmallPtrSet<PHINode*, 32> PHIs;
2991   uint64_t Len = GetStringLengthH(V, PHIs);
2992   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
2993   // an empty string as a length.
2994   return Len == ~0ULL ? 1 : Len;
2995 }
2996 
2997 /// \brief \p PN defines a loop-variant pointer to an object.  Check if the
2998 /// previous iteration of the loop was referring to the same object as \p PN.
2999 static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) {
3000   // Find the loop-defined value.
3001   Loop *L = LI->getLoopFor(PN->getParent());
3002   if (PN->getNumIncomingValues() != 2)
3003     return true;
3004 
3005   // Find the value from previous iteration.
3006   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3007   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3008     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3009   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3010     return true;
3011 
3012   // If a new pointer is loaded in the loop, the pointer references a different
3013   // object in every iteration.  E.g.:
3014   //    for (i)
3015   //       int *p = a[i];
3016   //       ...
3017   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3018     if (!L->isLoopInvariant(Load->getPointerOperand()))
3019       return false;
3020   return true;
3021 }
3022 
3023 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3024                                  unsigned MaxLookup) {
3025   if (!V->getType()->isPointerTy())
3026     return V;
3027   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3028     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3029       V = GEP->getPointerOperand();
3030     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3031                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3032       V = cast<Operator>(V)->getOperand(0);
3033     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3034       if (GA->isInterposable())
3035         return V;
3036       V = GA->getAliasee();
3037     } else {
3038       if (auto CS = CallSite(V))
3039         if (Value *RV = CS.getReturnedArgOperand()) {
3040           V = RV;
3041           continue;
3042         }
3043 
3044       // See if InstructionSimplify knows any relevant tricks.
3045       if (Instruction *I = dyn_cast<Instruction>(V))
3046         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3047         if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
3048           V = Simplified;
3049           continue;
3050         }
3051 
3052       return V;
3053     }
3054     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3055   }
3056   return V;
3057 }
3058 
3059 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3060                                 const DataLayout &DL, LoopInfo *LI,
3061                                 unsigned MaxLookup) {
3062   SmallPtrSet<Value *, 4> Visited;
3063   SmallVector<Value *, 4> Worklist;
3064   Worklist.push_back(V);
3065   do {
3066     Value *P = Worklist.pop_back_val();
3067     P = GetUnderlyingObject(P, DL, MaxLookup);
3068 
3069     if (!Visited.insert(P).second)
3070       continue;
3071 
3072     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3073       Worklist.push_back(SI->getTrueValue());
3074       Worklist.push_back(SI->getFalseValue());
3075       continue;
3076     }
3077 
3078     if (PHINode *PN = dyn_cast<PHINode>(P)) {
3079       // If this PHI changes the underlying object in every iteration of the
3080       // loop, don't look through it.  Consider:
3081       //   int **A;
3082       //   for (i) {
3083       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3084       //     Curr = A[i];
3085       //     *Prev, *Curr;
3086       //
3087       // Prev is tracking Curr one iteration behind so they refer to different
3088       // underlying objects.
3089       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3090           isSameUnderlyingObjectInLoop(PN, LI))
3091         for (Value *IncValue : PN->incoming_values())
3092           Worklist.push_back(IncValue);
3093       continue;
3094     }
3095 
3096     Objects.push_back(P);
3097   } while (!Worklist.empty());
3098 }
3099 
3100 /// Return true if the only users of this pointer are lifetime markers.
3101 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3102   for (const User *U : V->users()) {
3103     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3104     if (!II) return false;
3105 
3106     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3107         II->getIntrinsicID() != Intrinsic::lifetime_end)
3108       return false;
3109   }
3110   return true;
3111 }
3112 
3113 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3114                                         const Instruction *CtxI,
3115                                         const DominatorTree *DT) {
3116   const Operator *Inst = dyn_cast<Operator>(V);
3117   if (!Inst)
3118     return false;
3119 
3120   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3121     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3122       if (C->canTrap())
3123         return false;
3124 
3125   switch (Inst->getOpcode()) {
3126   default:
3127     return true;
3128   case Instruction::UDiv:
3129   case Instruction::URem: {
3130     // x / y is undefined if y == 0.
3131     const APInt *V;
3132     if (match(Inst->getOperand(1), m_APInt(V)))
3133       return *V != 0;
3134     return false;
3135   }
3136   case Instruction::SDiv:
3137   case Instruction::SRem: {
3138     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3139     const APInt *Numerator, *Denominator;
3140     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3141       return false;
3142     // We cannot hoist this division if the denominator is 0.
3143     if (*Denominator == 0)
3144       return false;
3145     // It's safe to hoist if the denominator is not 0 or -1.
3146     if (*Denominator != -1)
3147       return true;
3148     // At this point we know that the denominator is -1.  It is safe to hoist as
3149     // long we know that the numerator is not INT_MIN.
3150     if (match(Inst->getOperand(0), m_APInt(Numerator)))
3151       return !Numerator->isMinSignedValue();
3152     // The numerator *might* be MinSignedValue.
3153     return false;
3154   }
3155   case Instruction::Load: {
3156     const LoadInst *LI = cast<LoadInst>(Inst);
3157     if (!LI->isUnordered() ||
3158         // Speculative load may create a race that did not exist in the source.
3159         LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3160         // Speculative load may load data from dirty regions.
3161         LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress))
3162       return false;
3163     const DataLayout &DL = LI->getModule()->getDataLayout();
3164     return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3165                                               LI->getAlignment(), DL, CtxI, DT);
3166   }
3167   case Instruction::Call: {
3168     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
3169       switch (II->getIntrinsicID()) {
3170       // These synthetic intrinsics have no side-effects and just mark
3171       // information about their operands.
3172       // FIXME: There are other no-op synthetic instructions that potentially
3173       // should be considered at least *safe* to speculate...
3174       case Intrinsic::dbg_declare:
3175       case Intrinsic::dbg_value:
3176         return true;
3177 
3178       case Intrinsic::bswap:
3179       case Intrinsic::ctlz:
3180       case Intrinsic::ctpop:
3181       case Intrinsic::cttz:
3182       case Intrinsic::objectsize:
3183       case Intrinsic::sadd_with_overflow:
3184       case Intrinsic::smul_with_overflow:
3185       case Intrinsic::ssub_with_overflow:
3186       case Intrinsic::uadd_with_overflow:
3187       case Intrinsic::umul_with_overflow:
3188       case Intrinsic::usub_with_overflow:
3189         return true;
3190       // These intrinsics are defined to have the same behavior as libm
3191       // functions except for setting errno.
3192       case Intrinsic::sqrt:
3193       case Intrinsic::fma:
3194       case Intrinsic::fmuladd:
3195         return true;
3196       // These intrinsics are defined to have the same behavior as libm
3197       // functions, and the corresponding libm functions never set errno.
3198       case Intrinsic::trunc:
3199       case Intrinsic::copysign:
3200       case Intrinsic::fabs:
3201       case Intrinsic::minnum:
3202       case Intrinsic::maxnum:
3203         return true;
3204       // These intrinsics are defined to have the same behavior as libm
3205       // functions, which never overflow when operating on the IEEE754 types
3206       // that we support, and never set errno otherwise.
3207       case Intrinsic::ceil:
3208       case Intrinsic::floor:
3209       case Intrinsic::nearbyint:
3210       case Intrinsic::rint:
3211       case Intrinsic::round:
3212         return true;
3213       // TODO: are convert_{from,to}_fp16 safe?
3214       // TODO: can we list target-specific intrinsics here?
3215       default: break;
3216       }
3217     }
3218     return false; // The called function could have undefined behavior or
3219                   // side-effects, even if marked readnone nounwind.
3220   }
3221   case Instruction::VAArg:
3222   case Instruction::Alloca:
3223   case Instruction::Invoke:
3224   case Instruction::PHI:
3225   case Instruction::Store:
3226   case Instruction::Ret:
3227   case Instruction::Br:
3228   case Instruction::IndirectBr:
3229   case Instruction::Switch:
3230   case Instruction::Unreachable:
3231   case Instruction::Fence:
3232   case Instruction::AtomicRMW:
3233   case Instruction::AtomicCmpXchg:
3234   case Instruction::LandingPad:
3235   case Instruction::Resume:
3236   case Instruction::CatchSwitch:
3237   case Instruction::CatchPad:
3238   case Instruction::CatchRet:
3239   case Instruction::CleanupPad:
3240   case Instruction::CleanupRet:
3241     return false; // Misc instructions which have effects
3242   }
3243 }
3244 
3245 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3246   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3247 }
3248 
3249 /// Return true if we know that the specified value is never null.
3250 bool llvm::isKnownNonNull(const Value *V) {
3251   assert(V->getType()->isPointerTy() && "V must be pointer type");
3252 
3253   // Alloca never returns null, malloc might.
3254   if (isa<AllocaInst>(V)) return true;
3255 
3256   // A byval, inalloca, or nonnull argument is never null.
3257   if (const Argument *A = dyn_cast<Argument>(V))
3258     return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3259 
3260   // A global variable in address space 0 is non null unless extern weak.
3261   // Other address spaces may have null as a valid address for a global,
3262   // so we can't assume anything.
3263   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3264     return !GV->hasExternalWeakLinkage() &&
3265            GV->getType()->getAddressSpace() == 0;
3266 
3267   // A Load tagged with nonnull metadata is never null.
3268   if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3269     return LI->getMetadata(LLVMContext::MD_nonnull);
3270 
3271   if (auto CS = ImmutableCallSite(V))
3272     if (CS.isReturnNonNull())
3273       return true;
3274 
3275   return false;
3276 }
3277 
3278 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3279                                                   const Instruction *CtxI,
3280                                                   const DominatorTree *DT) {
3281   assert(V->getType()->isPointerTy() && "V must be pointer type");
3282 
3283   unsigned NumUsesExplored = 0;
3284   for (auto *U : V->users()) {
3285     // Avoid massive lists
3286     if (NumUsesExplored >= DomConditionsMaxUses)
3287       break;
3288     NumUsesExplored++;
3289     // Consider only compare instructions uniquely controlling a branch
3290     CmpInst::Predicate Pred;
3291     if (!match(const_cast<User *>(U),
3292                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
3293         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
3294       continue;
3295 
3296     for (auto *CmpU : U->users()) {
3297       if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
3298         assert(BI->isConditional() && "uses a comparison!");
3299 
3300         BasicBlock *NonNullSuccessor =
3301             BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
3302         BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3303         if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3304           return true;
3305       } else if (Pred == ICmpInst::ICMP_NE &&
3306                  match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
3307                  DT->dominates(cast<Instruction>(CmpU), CtxI)) {
3308         return true;
3309       }
3310     }
3311   }
3312 
3313   return false;
3314 }
3315 
3316 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3317                             const DominatorTree *DT) {
3318   if (isKnownNonNull(V))
3319     return true;
3320 
3321   return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
3322 }
3323 
3324 OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
3325                                                    const DataLayout &DL,
3326                                                    AssumptionCache *AC,
3327                                                    const Instruction *CxtI,
3328                                                    const DominatorTree *DT) {
3329   // Multiplying n * m significant bits yields a result of n + m significant
3330   // bits. If the total number of significant bits does not exceed the
3331   // result bit width (minus 1), there is no overflow.
3332   // This means if we have enough leading zero bits in the operands
3333   // we can guarantee that the result does not overflow.
3334   // Ref: "Hacker's Delight" by Henry Warren
3335   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3336   APInt LHSKnownZero(BitWidth, 0);
3337   APInt LHSKnownOne(BitWidth, 0);
3338   APInt RHSKnownZero(BitWidth, 0);
3339   APInt RHSKnownOne(BitWidth, 0);
3340   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3341                    DT);
3342   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3343                    DT);
3344   // Note that underestimating the number of zero bits gives a more
3345   // conservative answer.
3346   unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
3347                       RHSKnownZero.countLeadingOnes();
3348   // First handle the easy case: if we have enough zero bits there's
3349   // definitely no overflow.
3350   if (ZeroBits >= BitWidth)
3351     return OverflowResult::NeverOverflows;
3352 
3353   // Get the largest possible values for each operand.
3354   APInt LHSMax = ~LHSKnownZero;
3355   APInt RHSMax = ~RHSKnownZero;
3356 
3357   // We know the multiply operation doesn't overflow if the maximum values for
3358   // each operand will not overflow after we multiply them together.
3359   bool MaxOverflow;
3360   LHSMax.umul_ov(RHSMax, MaxOverflow);
3361   if (!MaxOverflow)
3362     return OverflowResult::NeverOverflows;
3363 
3364   // We know it always overflows if multiplying the smallest possible values for
3365   // the operands also results in overflow.
3366   bool MinOverflow;
3367   LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
3368   if (MinOverflow)
3369     return OverflowResult::AlwaysOverflows;
3370 
3371   return OverflowResult::MayOverflow;
3372 }
3373 
3374 OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
3375                                                    const DataLayout &DL,
3376                                                    AssumptionCache *AC,
3377                                                    const Instruction *CxtI,
3378                                                    const DominatorTree *DT) {
3379   bool LHSKnownNonNegative, LHSKnownNegative;
3380   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3381                  AC, CxtI, DT);
3382   if (LHSKnownNonNegative || LHSKnownNegative) {
3383     bool RHSKnownNonNegative, RHSKnownNegative;
3384     ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3385                    AC, CxtI, DT);
3386 
3387     if (LHSKnownNegative && RHSKnownNegative) {
3388       // The sign bit is set in both cases: this MUST overflow.
3389       // Create a simple add instruction, and insert it into the struct.
3390       return OverflowResult::AlwaysOverflows;
3391     }
3392 
3393     if (LHSKnownNonNegative && RHSKnownNonNegative) {
3394       // The sign bit is clear in both cases: this CANNOT overflow.
3395       // Create a simple add instruction, and insert it into the struct.
3396       return OverflowResult::NeverOverflows;
3397     }
3398   }
3399 
3400   return OverflowResult::MayOverflow;
3401 }
3402 
3403 static OverflowResult computeOverflowForSignedAdd(
3404     Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL,
3405     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) {
3406   if (Add && Add->hasNoSignedWrap()) {
3407     return OverflowResult::NeverOverflows;
3408   }
3409 
3410   bool LHSKnownNonNegative, LHSKnownNegative;
3411   bool RHSKnownNonNegative, RHSKnownNegative;
3412   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3413                  AC, CxtI, DT);
3414   ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3415                  AC, CxtI, DT);
3416 
3417   if ((LHSKnownNonNegative && RHSKnownNegative) ||
3418       (LHSKnownNegative && RHSKnownNonNegative)) {
3419     // The sign bits are opposite: this CANNOT overflow.
3420     return OverflowResult::NeverOverflows;
3421   }
3422 
3423   // The remaining code needs Add to be available. Early returns if not so.
3424   if (!Add)
3425     return OverflowResult::MayOverflow;
3426 
3427   // If the sign of Add is the same as at least one of the operands, this add
3428   // CANNOT overflow. This is particularly useful when the sum is
3429   // @llvm.assume'ed non-negative rather than proved so from analyzing its
3430   // operands.
3431   bool LHSOrRHSKnownNonNegative =
3432       (LHSKnownNonNegative || RHSKnownNonNegative);
3433   bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
3434   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3435     bool AddKnownNonNegative, AddKnownNegative;
3436     ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
3437                    /*Depth=*/0, AC, CxtI, DT);
3438     if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
3439         (AddKnownNegative && LHSOrRHSKnownNegative)) {
3440       return OverflowResult::NeverOverflows;
3441     }
3442   }
3443 
3444   return OverflowResult::MayOverflow;
3445 }
3446 
3447 bool llvm::isOverflowIntrinsicNoWrap(IntrinsicInst *II, DominatorTree &DT) {
3448 #ifndef NDEBUG
3449   auto IID = II->getIntrinsicID();
3450   assert((IID == Intrinsic::sadd_with_overflow ||
3451           IID == Intrinsic::uadd_with_overflow ||
3452           IID == Intrinsic::ssub_with_overflow ||
3453           IID == Intrinsic::usub_with_overflow ||
3454           IID == Intrinsic::smul_with_overflow ||
3455           IID == Intrinsic::umul_with_overflow) &&
3456          "Not an overflow intrinsic!");
3457 #endif
3458 
3459   SmallVector<BranchInst *, 2> GuardingBranches;
3460   SmallVector<ExtractValueInst *, 2> Results;
3461 
3462   for (User *U : II->users()) {
3463     if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3464       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3465 
3466       if (EVI->getIndices()[0] == 0)
3467         Results.push_back(EVI);
3468       else {
3469         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3470 
3471         for (auto *U : EVI->users())
3472           if (auto *B = dyn_cast<BranchInst>(U)) {
3473             assert(B->isConditional() && "How else is it using an i1?");
3474             GuardingBranches.push_back(B);
3475           }
3476       }
3477     } else {
3478       // We are using the aggregate directly in a way we don't want to analyze
3479       // here (storing it to a global, say).
3480       return false;
3481     }
3482   }
3483 
3484   auto AllUsesGuardedByBranch = [&](BranchInst *BI) {
3485     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3486     if (!NoWrapEdge.isSingleEdge())
3487       return false;
3488 
3489     // Check if all users of the add are provably no-wrap.
3490     for (auto *Result : Results) {
3491       // If the extractvalue itself is not executed on overflow, the we don't
3492       // need to check each use separately, since domination is transitive.
3493       if (DT.dominates(NoWrapEdge, Result->getParent()))
3494         continue;
3495 
3496       for (auto &RU : Result->uses())
3497         if (!DT.dominates(NoWrapEdge, RU))
3498           return false;
3499     }
3500 
3501     return true;
3502   };
3503 
3504   return any_of(GuardingBranches, AllUsesGuardedByBranch);
3505 }
3506 
3507 
3508 OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
3509                                                  const DataLayout &DL,
3510                                                  AssumptionCache *AC,
3511                                                  const Instruction *CxtI,
3512                                                  const DominatorTree *DT) {
3513   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3514                                        Add, DL, AC, CxtI, DT);
3515 }
3516 
3517 OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS,
3518                                                  const DataLayout &DL,
3519                                                  AssumptionCache *AC,
3520                                                  const Instruction *CxtI,
3521                                                  const DominatorTree *DT) {
3522   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3523 }
3524 
3525 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3526   // A memory operation returns normally if it isn't volatile. A volatile
3527   // operation is allowed to trap.
3528   //
3529   // An atomic operation isn't guaranteed to return in a reasonable amount of
3530   // time because it's possible for another thread to interfere with it for an
3531   // arbitrary length of time, but programs aren't allowed to rely on that.
3532   if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3533     return !LI->isVolatile();
3534   if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3535     return !SI->isVolatile();
3536   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3537     return !CXI->isVolatile();
3538   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3539     return !RMWI->isVolatile();
3540   if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3541     return !MII->isVolatile();
3542 
3543   // If there is no successor, then execution can't transfer to it.
3544   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3545     return !CRI->unwindsToCaller();
3546   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3547     return !CatchSwitch->unwindsToCaller();
3548   if (isa<ResumeInst>(I))
3549     return false;
3550   if (isa<ReturnInst>(I))
3551     return false;
3552 
3553   // Calls can throw, or contain an infinite loop, or kill the process.
3554   if (CallSite CS = CallSite(const_cast<Instruction*>(I))) {
3555     // Calls which don't write to arbitrary memory are safe.
3556     // FIXME: Ignoring infinite loops without any side-effects is too aggressive,
3557     // but it's consistent with other passes. See http://llvm.org/PR965 .
3558     // FIXME: This isn't aggressive enough; a call which only writes to a
3559     // global is guaranteed to return.
3560     return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3561            match(I, m_Intrinsic<Intrinsic::assume>());
3562   }
3563 
3564   // Other instructions return normally.
3565   return true;
3566 }
3567 
3568 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3569                                                   const Loop *L) {
3570   // The loop header is guaranteed to be executed for every iteration.
3571   //
3572   // FIXME: Relax this constraint to cover all basic blocks that are
3573   // guaranteed to be executed at every iteration.
3574   if (I->getParent() != L->getHeader()) return false;
3575 
3576   for (const Instruction &LI : *L->getHeader()) {
3577     if (&LI == I) return true;
3578     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3579   }
3580   llvm_unreachable("Instruction not contained in its own parent basic block.");
3581 }
3582 
3583 bool llvm::propagatesFullPoison(const Instruction *I) {
3584   switch (I->getOpcode()) {
3585     case Instruction::Add:
3586     case Instruction::Sub:
3587     case Instruction::Xor:
3588     case Instruction::Trunc:
3589     case Instruction::BitCast:
3590     case Instruction::AddrSpaceCast:
3591       // These operations all propagate poison unconditionally. Note that poison
3592       // is not any particular value, so xor or subtraction of poison with
3593       // itself still yields poison, not zero.
3594       return true;
3595 
3596     case Instruction::AShr:
3597     case Instruction::SExt:
3598       // For these operations, one bit of the input is replicated across
3599       // multiple output bits. A replicated poison bit is still poison.
3600       return true;
3601 
3602     case Instruction::Shl: {
3603       // Left shift *by* a poison value is poison. The number of
3604       // positions to shift is unsigned, so no negative values are
3605       // possible there. Left shift by zero places preserves poison. So
3606       // it only remains to consider left shift of poison by a positive
3607       // number of places.
3608       //
3609       // A left shift by a positive number of places leaves the lowest order bit
3610       // non-poisoned. However, if such a shift has a no-wrap flag, then we can
3611       // make the poison operand violate that flag, yielding a fresh full-poison
3612       // value.
3613       auto *OBO = cast<OverflowingBinaryOperator>(I);
3614       return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
3615     }
3616 
3617     case Instruction::Mul: {
3618       // A multiplication by zero yields a non-poison zero result, so we need to
3619       // rule out zero as an operand. Conservatively, multiplication by a
3620       // non-zero constant is not multiplication by zero.
3621       //
3622       // Multiplication by a non-zero constant can leave some bits
3623       // non-poisoned. For example, a multiplication by 2 leaves the lowest
3624       // order bit unpoisoned. So we need to consider that.
3625       //
3626       // Multiplication by 1 preserves poison. If the multiplication has a
3627       // no-wrap flag, then we can make the poison operand violate that flag
3628       // when multiplied by any integer other than 0 and 1.
3629       auto *OBO = cast<OverflowingBinaryOperator>(I);
3630       if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) {
3631         for (Value *V : OBO->operands()) {
3632           if (auto *CI = dyn_cast<ConstantInt>(V)) {
3633             // A ConstantInt cannot yield poison, so we can assume that it is
3634             // the other operand that is poison.
3635             return !CI->isZero();
3636           }
3637         }
3638       }
3639       return false;
3640     }
3641 
3642     case Instruction::ICmp:
3643       // Comparing poison with any value yields poison.  This is why, for
3644       // instance, x s< (x +nsw 1) can be folded to true.
3645       return true;
3646 
3647     case Instruction::GetElementPtr:
3648       // A GEP implicitly represents a sequence of additions, subtractions,
3649       // truncations, sign extensions and multiplications. The multiplications
3650       // are by the non-zero sizes of some set of types, so we do not have to be
3651       // concerned with multiplication by zero. If the GEP is in-bounds, then
3652       // these operations are implicitly no-signed-wrap so poison is propagated
3653       // by the arguments above for Add, Sub, Trunc, SExt and Mul.
3654       return cast<GEPOperator>(I)->isInBounds();
3655 
3656     default:
3657       return false;
3658   }
3659 }
3660 
3661 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3662   switch (I->getOpcode()) {
3663     case Instruction::Store:
3664       return cast<StoreInst>(I)->getPointerOperand();
3665 
3666     case Instruction::Load:
3667       return cast<LoadInst>(I)->getPointerOperand();
3668 
3669     case Instruction::AtomicCmpXchg:
3670       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3671 
3672     case Instruction::AtomicRMW:
3673       return cast<AtomicRMWInst>(I)->getPointerOperand();
3674 
3675     case Instruction::UDiv:
3676     case Instruction::SDiv:
3677     case Instruction::URem:
3678     case Instruction::SRem:
3679       return I->getOperand(1);
3680 
3681     default:
3682       return nullptr;
3683   }
3684 }
3685 
3686 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
3687   // We currently only look for uses of poison values within the same basic
3688   // block, as that makes it easier to guarantee that the uses will be
3689   // executed given that PoisonI is executed.
3690   //
3691   // FIXME: Expand this to consider uses beyond the same basic block. To do
3692   // this, look out for the distinction between post-dominance and strong
3693   // post-dominance.
3694   const BasicBlock *BB = PoisonI->getParent();
3695 
3696   // Set of instructions that we have proved will yield poison if PoisonI
3697   // does.
3698   SmallSet<const Value *, 16> YieldsPoison;
3699   SmallSet<const BasicBlock *, 4> Visited;
3700   YieldsPoison.insert(PoisonI);
3701   Visited.insert(PoisonI->getParent());
3702 
3703   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
3704 
3705   unsigned Iter = 0;
3706   while (Iter++ < MaxDepth) {
3707     for (auto &I : make_range(Begin, End)) {
3708       if (&I != PoisonI) {
3709         const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
3710         if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
3711           return true;
3712         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
3713           return false;
3714       }
3715 
3716       // Mark poison that propagates from I through uses of I.
3717       if (YieldsPoison.count(&I)) {
3718         for (const User *User : I.users()) {
3719           const Instruction *UserI = cast<Instruction>(User);
3720           if (propagatesFullPoison(UserI))
3721             YieldsPoison.insert(User);
3722         }
3723       }
3724     }
3725 
3726     if (auto *NextBB = BB->getSingleSuccessor()) {
3727       if (Visited.insert(NextBB).second) {
3728         BB = NextBB;
3729         Begin = BB->getFirstNonPHI()->getIterator();
3730         End = BB->end();
3731         continue;
3732       }
3733     }
3734 
3735     break;
3736   };
3737   return false;
3738 }
3739 
3740 static bool isKnownNonNaN(Value *V, FastMathFlags FMF) {
3741   if (FMF.noNaNs())
3742     return true;
3743 
3744   if (auto *C = dyn_cast<ConstantFP>(V))
3745     return !C->isNaN();
3746   return false;
3747 }
3748 
3749 static bool isKnownNonZero(Value *V) {
3750   if (auto *C = dyn_cast<ConstantFP>(V))
3751     return !C->isZero();
3752   return false;
3753 }
3754 
3755 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
3756                                               FastMathFlags FMF,
3757                                               Value *CmpLHS, Value *CmpRHS,
3758                                               Value *TrueVal, Value *FalseVal,
3759                                               Value *&LHS, Value *&RHS) {
3760   LHS = CmpLHS;
3761   RHS = CmpRHS;
3762 
3763   // If the predicate is an "or-equal"  (FP) predicate, then signed zeroes may
3764   // return inconsistent results between implementations.
3765   //   (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
3766   //   minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
3767   // Therefore we behave conservatively and only proceed if at least one of the
3768   // operands is known to not be zero, or if we don't care about signed zeroes.
3769   switch (Pred) {
3770   default: break;
3771   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
3772   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
3773     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
3774         !isKnownNonZero(CmpRHS))
3775       return {SPF_UNKNOWN, SPNB_NA, false};
3776   }
3777 
3778   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
3779   bool Ordered = false;
3780 
3781   // When given one NaN and one non-NaN input:
3782   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
3783   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
3784   //     ordered comparison fails), which could be NaN or non-NaN.
3785   // so here we discover exactly what NaN behavior is required/accepted.
3786   if (CmpInst::isFPPredicate(Pred)) {
3787     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
3788     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
3789 
3790     if (LHSSafe && RHSSafe) {
3791       // Both operands are known non-NaN.
3792       NaNBehavior = SPNB_RETURNS_ANY;
3793     } else if (CmpInst::isOrdered(Pred)) {
3794       // An ordered comparison will return false when given a NaN, so it
3795       // returns the RHS.
3796       Ordered = true;
3797       if (LHSSafe)
3798         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
3799         NaNBehavior = SPNB_RETURNS_NAN;
3800       else if (RHSSafe)
3801         NaNBehavior = SPNB_RETURNS_OTHER;
3802       else
3803         // Completely unsafe.
3804         return {SPF_UNKNOWN, SPNB_NA, false};
3805     } else {
3806       Ordered = false;
3807       // An unordered comparison will return true when given a NaN, so it
3808       // returns the LHS.
3809       if (LHSSafe)
3810         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
3811         NaNBehavior = SPNB_RETURNS_OTHER;
3812       else if (RHSSafe)
3813         NaNBehavior = SPNB_RETURNS_NAN;
3814       else
3815         // Completely unsafe.
3816         return {SPF_UNKNOWN, SPNB_NA, false};
3817     }
3818   }
3819 
3820   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
3821     std::swap(CmpLHS, CmpRHS);
3822     Pred = CmpInst::getSwappedPredicate(Pred);
3823     if (NaNBehavior == SPNB_RETURNS_NAN)
3824       NaNBehavior = SPNB_RETURNS_OTHER;
3825     else if (NaNBehavior == SPNB_RETURNS_OTHER)
3826       NaNBehavior = SPNB_RETURNS_NAN;
3827     Ordered = !Ordered;
3828   }
3829 
3830   // ([if]cmp X, Y) ? X : Y
3831   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
3832     switch (Pred) {
3833     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
3834     case ICmpInst::ICMP_UGT:
3835     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
3836     case ICmpInst::ICMP_SGT:
3837     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
3838     case ICmpInst::ICMP_ULT:
3839     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
3840     case ICmpInst::ICMP_SLT:
3841     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
3842     case FCmpInst::FCMP_UGT:
3843     case FCmpInst::FCMP_UGE:
3844     case FCmpInst::FCMP_OGT:
3845     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
3846     case FCmpInst::FCMP_ULT:
3847     case FCmpInst::FCMP_ULE:
3848     case FCmpInst::FCMP_OLT:
3849     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
3850     }
3851   }
3852 
3853   if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) {
3854     if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
3855         (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
3856 
3857       // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
3858       // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
3859       if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) {
3860         return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3861       }
3862 
3863       // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
3864       // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
3865       if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) {
3866         return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3867       }
3868     }
3869 
3870     // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C)
3871     if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) {
3872       if (Pred == ICmpInst::ICMP_SGT && C1->getType() == C2->getType() &&
3873           ~C1->getValue() == C2->getValue() &&
3874           (match(TrueVal, m_Not(m_Specific(CmpLHS))) ||
3875            match(CmpLHS, m_Not(m_Specific(TrueVal))))) {
3876         LHS = TrueVal;
3877         RHS = FalseVal;
3878         return {SPF_SMIN, SPNB_NA, false};
3879       }
3880     }
3881   }
3882 
3883   // TODO: (X > 4) ? X : 5   -->  (X >= 5) ? X : 5  -->  MAX(X, 5)
3884 
3885   return {SPF_UNKNOWN, SPNB_NA, false};
3886 }
3887 
3888 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
3889                               Instruction::CastOps *CastOp) {
3890   CastInst *CI = dyn_cast<CastInst>(V1);
3891   Constant *C = dyn_cast<Constant>(V2);
3892   if (!CI)
3893     return nullptr;
3894   *CastOp = CI->getOpcode();
3895 
3896   if (auto *CI2 = dyn_cast<CastInst>(V2)) {
3897     // If V1 and V2 are both the same cast from the same type, we can look
3898     // through V1.
3899     if (CI2->getOpcode() == CI->getOpcode() &&
3900         CI2->getSrcTy() == CI->getSrcTy())
3901       return CI2->getOperand(0);
3902     return nullptr;
3903   } else if (!C) {
3904     return nullptr;
3905   }
3906 
3907   Constant *CastedTo = nullptr;
3908 
3909   if (isa<ZExtInst>(CI) && CmpI->isUnsigned())
3910     CastedTo = ConstantExpr::getTrunc(C, CI->getSrcTy());
3911 
3912   if (isa<SExtInst>(CI) && CmpI->isSigned())
3913     CastedTo = ConstantExpr::getTrunc(C, CI->getSrcTy(), true);
3914 
3915   if (isa<TruncInst>(CI))
3916     CastedTo = ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned());
3917 
3918   if (isa<FPTruncInst>(CI))
3919     CastedTo = ConstantExpr::getFPExtend(C, CI->getSrcTy(), true);
3920 
3921   if (isa<FPExtInst>(CI))
3922     CastedTo = ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true);
3923 
3924   if (isa<FPToUIInst>(CI))
3925     CastedTo = ConstantExpr::getUIToFP(C, CI->getSrcTy(), true);
3926 
3927   if (isa<FPToSIInst>(CI))
3928     CastedTo = ConstantExpr::getSIToFP(C, CI->getSrcTy(), true);
3929 
3930   if (isa<UIToFPInst>(CI))
3931     CastedTo = ConstantExpr::getFPToUI(C, CI->getSrcTy(), true);
3932 
3933   if (isa<SIToFPInst>(CI))
3934     CastedTo = ConstantExpr::getFPToSI(C, CI->getSrcTy(), true);
3935 
3936   if (!CastedTo)
3937     return nullptr;
3938 
3939   Constant *CastedBack =
3940       ConstantExpr::getCast(CI->getOpcode(), CastedTo, C->getType(), true);
3941   // Make sure the cast doesn't lose any information.
3942   if (CastedBack != C)
3943     return nullptr;
3944 
3945   return CastedTo;
3946 }
3947 
3948 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
3949                                              Instruction::CastOps *CastOp) {
3950   SelectInst *SI = dyn_cast<SelectInst>(V);
3951   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
3952 
3953   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
3954   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
3955 
3956   CmpInst::Predicate Pred = CmpI->getPredicate();
3957   Value *CmpLHS = CmpI->getOperand(0);
3958   Value *CmpRHS = CmpI->getOperand(1);
3959   Value *TrueVal = SI->getTrueValue();
3960   Value *FalseVal = SI->getFalseValue();
3961   FastMathFlags FMF;
3962   if (isa<FPMathOperator>(CmpI))
3963     FMF = CmpI->getFastMathFlags();
3964 
3965   // Bail out early.
3966   if (CmpI->isEquality())
3967     return {SPF_UNKNOWN, SPNB_NA, false};
3968 
3969   // Deal with type mismatches.
3970   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
3971     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
3972       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
3973                                   cast<CastInst>(TrueVal)->getOperand(0), C,
3974                                   LHS, RHS);
3975     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
3976       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
3977                                   C, cast<CastInst>(FalseVal)->getOperand(0),
3978                                   LHS, RHS);
3979   }
3980   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
3981                               LHS, RHS);
3982 }
3983 
3984 ConstantRange llvm::getConstantRangeFromMetadata(MDNode &Ranges) {
3985   const unsigned NumRanges = Ranges.getNumOperands() / 2;
3986   assert(NumRanges >= 1 && "Must have at least one range!");
3987   assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs");
3988 
3989   auto *FirstLow = mdconst::extract<ConstantInt>(Ranges.getOperand(0));
3990   auto *FirstHigh = mdconst::extract<ConstantInt>(Ranges.getOperand(1));
3991 
3992   ConstantRange CR(FirstLow->getValue(), FirstHigh->getValue());
3993 
3994   for (unsigned i = 1; i < NumRanges; ++i) {
3995     auto *Low = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
3996     auto *High = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
3997 
3998     // Note: unionWith will potentially create a range that contains values not
3999     // contained in any of the original N ranges.
4000     CR = CR.unionWith(ConstantRange(Low->getValue(), High->getValue()));
4001   }
4002 
4003   return CR;
4004 }
4005 
4006 /// Return true if "icmp Pred LHS RHS" is always true.
4007 static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
4008                             const DataLayout &DL, unsigned Depth,
4009                             AssumptionCache *AC, const Instruction *CxtI,
4010                             const DominatorTree *DT) {
4011   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4012   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4013     return true;
4014 
4015   switch (Pred) {
4016   default:
4017     return false;
4018 
4019   case CmpInst::ICMP_SLE: {
4020     const APInt *C;
4021 
4022     // LHS s<= LHS +_{nsw} C   if C >= 0
4023     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4024       return !C->isNegative();
4025     return false;
4026   }
4027 
4028   case CmpInst::ICMP_ULE: {
4029     const APInt *C;
4030 
4031     // LHS u<= LHS +_{nuw} C   for any C
4032     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4033       return true;
4034 
4035     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4036     auto MatchNUWAddsToSameValue = [&](Value *A, Value *B, Value *&X,
4037                                        const APInt *&CA, const APInt *&CB) {
4038       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4039           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4040         return true;
4041 
4042       // If X & C == 0 then (X | C) == X +_{nuw} C
4043       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4044           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4045         unsigned BitWidth = CA->getBitWidth();
4046         APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
4047         computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
4048 
4049         if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
4050           return true;
4051       }
4052 
4053       return false;
4054     };
4055 
4056     Value *X;
4057     const APInt *CLHS, *CRHS;
4058     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4059       return CLHS->ule(*CRHS);
4060 
4061     return false;
4062   }
4063   }
4064 }
4065 
4066 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4067 /// ALHS ARHS" is true.  Otherwise, return None.
4068 static Optional<bool>
4069 isImpliedCondOperands(CmpInst::Predicate Pred, Value *ALHS, Value *ARHS,
4070                       Value *BLHS, Value *BRHS, const DataLayout &DL,
4071                       unsigned Depth, AssumptionCache *AC,
4072                       const Instruction *CxtI, const DominatorTree *DT) {
4073   switch (Pred) {
4074   default:
4075     return None;
4076 
4077   case CmpInst::ICMP_SLT:
4078   case CmpInst::ICMP_SLE:
4079     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
4080                         DT) &&
4081         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4082       return true;
4083     return None;
4084 
4085   case CmpInst::ICMP_ULT:
4086   case CmpInst::ICMP_ULE:
4087     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
4088                         DT) &&
4089         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4090       return true;
4091     return None;
4092   }
4093 }
4094 
4095 /// Return true if the operands of the two compares match.  IsSwappedOps is true
4096 /// when the operands match, but are swapped.
4097 static bool isMatchingOps(Value *ALHS, Value *ARHS, Value *BLHS, Value *BRHS,
4098                           bool &IsSwappedOps) {
4099 
4100   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4101   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4102   return IsMatchingOps || IsSwappedOps;
4103 }
4104 
4105 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4106 /// true.  Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4107 /// BRHS" is false.  Otherwise, return None if we can't infer anything.
4108 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4109                                                     Value *ALHS, Value *ARHS,
4110                                                     CmpInst::Predicate BPred,
4111                                                     Value *BLHS, Value *BRHS,
4112                                                     bool IsSwappedOps) {
4113   // Canonicalize the operands so they're matching.
4114   if (IsSwappedOps) {
4115     std::swap(BLHS, BRHS);
4116     BPred = ICmpInst::getSwappedPredicate(BPred);
4117   }
4118   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4119     return true;
4120   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4121     return false;
4122 
4123   return None;
4124 }
4125 
4126 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4127 /// true.  Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4128 /// C2" is false.  Otherwise, return None if we can't infer anything.
4129 static Optional<bool>
4130 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, Value *ALHS,
4131                                  ConstantInt *C1, CmpInst::Predicate BPred,
4132                                  Value *BLHS, ConstantInt *C2) {
4133   assert(ALHS == BLHS && "LHS operands must match.");
4134   ConstantRange DomCR =
4135       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4136   ConstantRange CR =
4137       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4138   ConstantRange Intersection = DomCR.intersectWith(CR);
4139   ConstantRange Difference = DomCR.difference(CR);
4140   if (Intersection.isEmptySet())
4141     return false;
4142   if (Difference.isEmptySet())
4143     return true;
4144   return None;
4145 }
4146 
4147 Optional<bool> llvm::isImpliedCondition(Value *LHS, Value *RHS,
4148                                         const DataLayout &DL, bool InvertAPred,
4149                                         unsigned Depth, AssumptionCache *AC,
4150                                         const Instruction *CxtI,
4151                                         const DominatorTree *DT) {
4152   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example.
4153   if (LHS->getType() != RHS->getType())
4154     return None;
4155 
4156   Type *OpTy = LHS->getType();
4157   assert(OpTy->getScalarType()->isIntegerTy(1));
4158 
4159   // LHS ==> RHS by definition
4160   if (!InvertAPred && LHS == RHS)
4161     return true;
4162 
4163   if (OpTy->isVectorTy())
4164     // TODO: extending the code below to handle vectors
4165     return None;
4166   assert(OpTy->isIntegerTy(1) && "implied by above");
4167 
4168   ICmpInst::Predicate APred, BPred;
4169   Value *ALHS, *ARHS;
4170   Value *BLHS, *BRHS;
4171 
4172   if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
4173       !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
4174     return None;
4175 
4176   if (InvertAPred)
4177     APred = CmpInst::getInversePredicate(APred);
4178 
4179   // Can we infer anything when the two compares have matching operands?
4180   bool IsSwappedOps;
4181   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4182     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4183             APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4184       return Implication;
4185     // No amount of additional analysis will infer the second condition, so
4186     // early exit.
4187     return None;
4188   }
4189 
4190   // Can we infer anything when the LHS operands match and the RHS operands are
4191   // constants (not necessarily matching)?
4192   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4193     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4194             APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4195             cast<ConstantInt>(BRHS)))
4196       return Implication;
4197     // No amount of additional analysis will infer the second condition, so
4198     // early exit.
4199     return None;
4200   }
4201 
4202   if (APred == BPred)
4203     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
4204                                  CxtI, DT);
4205 
4206   return None;
4207 }
4208