1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/Analysis/VectorUtils.h"
24 #include "llvm/IR/CallSite.h"
25 #include "llvm/IR/ConstantRange.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/Dominators.h"
29 #include "llvm/IR/GetElementPtrTypeIterator.h"
30 #include "llvm/IR/GlobalAlias.h"
31 #include "llvm/IR/GlobalVariable.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/Metadata.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/IR/Statepoint.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/MathExtras.h"
41 #include <algorithm>
42 #include <array>
43 #include <cstring>
44 using namespace llvm;
45 using namespace llvm::PatternMatch;
46 
47 const unsigned MaxDepth = 6;
48 
49 // Controls the number of uses of the value searched for possible
50 // dominating comparisons.
51 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
52                                               cl::Hidden, cl::init(20));
53 
54 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns
55 /// 0). For vector types, returns the element type's bitwidth.
56 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
57   if (unsigned BitWidth = Ty->getScalarSizeInBits())
58     return BitWidth;
59 
60   return DL.getPointerTypeSizeInBits(Ty);
61 }
62 
63 namespace {
64 // Simplifying using an assume can only be done in a particular control-flow
65 // context (the context instruction provides that context). If an assume and
66 // the context instruction are not in the same block then the DT helps in
67 // figuring out if we can use it.
68 struct Query {
69   const DataLayout &DL;
70   AssumptionCache *AC;
71   const Instruction *CxtI;
72   const DominatorTree *DT;
73 
74   /// Set of assumptions that should be excluded from further queries.
75   /// This is because of the potential for mutual recursion to cause
76   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
77   /// classic case of this is assume(x = y), which will attempt to determine
78   /// bits in x from bits in y, which will attempt to determine bits in y from
79   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
80   /// isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
81   /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so
82   /// on.
83   std::array<const Value*, MaxDepth> Excluded;
84   unsigned NumExcluded;
85 
86   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
87         const DominatorTree *DT)
88       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), NumExcluded(0) {}
89 
90   Query(const Query &Q, const Value *NewExcl)
91       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) {
92     Excluded = Q.Excluded;
93     Excluded[NumExcluded++] = NewExcl;
94     assert(NumExcluded <= Excluded.size());
95   }
96 
97   bool isExcluded(const Value *Value) const {
98     if (NumExcluded == 0)
99       return false;
100     auto End = Excluded.begin() + NumExcluded;
101     return std::find(Excluded.begin(), End, Value) != End;
102   }
103 };
104 } // end anonymous namespace
105 
106 // Given the provided Value and, potentially, a context instruction, return
107 // the preferred context instruction (if any).
108 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
109   // If we've been provided with a context instruction, then use that (provided
110   // it has been inserted).
111   if (CxtI && CxtI->getParent())
112     return CxtI;
113 
114   // If the value is really an already-inserted instruction, then use that.
115   CxtI = dyn_cast<Instruction>(V);
116   if (CxtI && CxtI->getParent())
117     return CxtI;
118 
119   return nullptr;
120 }
121 
122 static void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
123                              unsigned Depth, const Query &Q);
124 
125 void llvm::computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
126                             const DataLayout &DL, unsigned Depth,
127                             AssumptionCache *AC, const Instruction *CxtI,
128                             const DominatorTree *DT) {
129   ::computeKnownBits(V, KnownZero, KnownOne, Depth,
130                      Query(DL, AC, safeCxtI(V, CxtI), DT));
131 }
132 
133 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
134                                const DataLayout &DL,
135                                AssumptionCache *AC, const Instruction *CxtI,
136                                const DominatorTree *DT) {
137   assert(LHS->getType() == RHS->getType() &&
138          "LHS and RHS should have the same type");
139   assert(LHS->getType()->isIntOrIntVectorTy() &&
140          "LHS and RHS should be integers");
141   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
142   APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
143   APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
144   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
145   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
146   return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
147 }
148 
149 static void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
150                            unsigned Depth, const Query &Q);
151 
152 void llvm::ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
153                           const DataLayout &DL, unsigned Depth,
154                           AssumptionCache *AC, const Instruction *CxtI,
155                           const DominatorTree *DT) {
156   ::ComputeSignBit(V, KnownZero, KnownOne, Depth,
157                    Query(DL, AC, safeCxtI(V, CxtI), DT));
158 }
159 
160 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
161                                    const Query &Q);
162 
163 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
164                                   bool OrZero,
165                                   unsigned Depth, AssumptionCache *AC,
166                                   const Instruction *CxtI,
167                                   const DominatorTree *DT) {
168   return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
169                                   Query(DL, AC, safeCxtI(V, CxtI), DT));
170 }
171 
172 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
173 
174 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
175                           AssumptionCache *AC, const Instruction *CxtI,
176                           const DominatorTree *DT) {
177   return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
178 }
179 
180 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
181                               unsigned Depth,
182                               AssumptionCache *AC, const Instruction *CxtI,
183                               const DominatorTree *DT) {
184   bool NonNegative, Negative;
185   ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
186   return NonNegative;
187 }
188 
189 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
190                            AssumptionCache *AC, const Instruction *CxtI,
191                            const DominatorTree *DT) {
192   if (auto *CI = dyn_cast<ConstantInt>(V))
193     return CI->getValue().isStrictlyPositive();
194 
195   // TODO: We'd doing two recursive queries here.  We should factor this such
196   // that only a single query is needed.
197   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
198     isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
199 }
200 
201 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
202                            AssumptionCache *AC, const Instruction *CxtI,
203                            const DominatorTree *DT) {
204   bool NonNegative, Negative;
205   ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
206   return Negative;
207 }
208 
209 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
210 
211 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
212                            const DataLayout &DL,
213                            AssumptionCache *AC, const Instruction *CxtI,
214                            const DominatorTree *DT) {
215   return ::isKnownNonEqual(V1, V2, Query(DL, AC,
216                                          safeCxtI(V1, safeCxtI(V2, CxtI)),
217                                          DT));
218 }
219 
220 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
221                               const Query &Q);
222 
223 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
224                              const DataLayout &DL,
225                              unsigned Depth, AssumptionCache *AC,
226                              const Instruction *CxtI, const DominatorTree *DT) {
227   return ::MaskedValueIsZero(V, Mask, Depth,
228                              Query(DL, AC, safeCxtI(V, CxtI), DT));
229 }
230 
231 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
232                                    const Query &Q);
233 
234 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
235                                   unsigned Depth, AssumptionCache *AC,
236                                   const Instruction *CxtI,
237                                   const DominatorTree *DT) {
238   return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
239 }
240 
241 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
242                                    bool NSW,
243                                    APInt &KnownZero, APInt &KnownOne,
244                                    APInt &KnownZero2, APInt &KnownOne2,
245                                    unsigned Depth, const Query &Q) {
246   if (!Add) {
247     if (const ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
248       // We know that the top bits of C-X are clear if X contains less bits
249       // than C (i.e. no wrap-around can happen).  For example, 20-X is
250       // positive if we can prove that X is >= 0 and < 16.
251       if (!CLHS->getValue().isNegative()) {
252         unsigned BitWidth = KnownZero.getBitWidth();
253         unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
254         // NLZ can't be BitWidth with no sign bit
255         APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
256         computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
257 
258         // If all of the MaskV bits are known to be zero, then we know the
259         // output top bits are zero, because we now know that the output is
260         // from [0-C].
261         if ((KnownZero2 & MaskV) == MaskV) {
262           unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
263           // Top bits known zero.
264           KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
265         }
266       }
267     }
268   }
269 
270   unsigned BitWidth = KnownZero.getBitWidth();
271 
272   // If an initial sequence of bits in the result is not needed, the
273   // corresponding bits in the operands are not needed.
274   APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
275   computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, Depth + 1, Q);
276   computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
277 
278   // Carry in a 1 for a subtract, rather than a 0.
279   APInt CarryIn(BitWidth, 0);
280   if (!Add) {
281     // Sum = LHS + ~RHS + 1
282     std::swap(KnownZero2, KnownOne2);
283     CarryIn.setBit(0);
284   }
285 
286   APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
287   APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
288 
289   // Compute known bits of the carry.
290   APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
291   APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
292 
293   // Compute set of known bits (where all three relevant bits are known).
294   APInt LHSKnown = LHSKnownZero | LHSKnownOne;
295   APInt RHSKnown = KnownZero2 | KnownOne2;
296   APInt CarryKnown = CarryKnownZero | CarryKnownOne;
297   APInt Known = LHSKnown & RHSKnown & CarryKnown;
298 
299   assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
300          "known bits of sum differ");
301 
302   // Compute known bits of the result.
303   KnownZero = ~PossibleSumOne & Known;
304   KnownOne = PossibleSumOne & Known;
305 
306   // Are we still trying to solve for the sign bit?
307   if (!Known.isNegative()) {
308     if (NSW) {
309       // Adding two non-negative numbers, or subtracting a negative number from
310       // a non-negative one, can't wrap into negative.
311       if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
312         KnownZero |= APInt::getSignBit(BitWidth);
313       // Adding two negative numbers, or subtracting a non-negative number from
314       // a negative one, can't wrap into non-negative.
315       else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
316         KnownOne |= APInt::getSignBit(BitWidth);
317     }
318   }
319 }
320 
321 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
322                                 APInt &KnownZero, APInt &KnownOne,
323                                 APInt &KnownZero2, APInt &KnownOne2,
324                                 unsigned Depth, const Query &Q) {
325   unsigned BitWidth = KnownZero.getBitWidth();
326   computeKnownBits(Op1, KnownZero, KnownOne, Depth + 1, Q);
327   computeKnownBits(Op0, KnownZero2, KnownOne2, Depth + 1, Q);
328 
329   bool isKnownNegative = false;
330   bool isKnownNonNegative = false;
331   // If the multiplication is known not to overflow, compute the sign bit.
332   if (NSW) {
333     if (Op0 == Op1) {
334       // The product of a number with itself is non-negative.
335       isKnownNonNegative = true;
336     } else {
337       bool isKnownNonNegativeOp1 = KnownZero.isNegative();
338       bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
339       bool isKnownNegativeOp1 = KnownOne.isNegative();
340       bool isKnownNegativeOp0 = KnownOne2.isNegative();
341       // The product of two numbers with the same sign is non-negative.
342       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
343         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
344       // The product of a negative number and a non-negative number is either
345       // negative or zero.
346       if (!isKnownNonNegative)
347         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
348                            isKnownNonZero(Op0, Depth, Q)) ||
349                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
350                            isKnownNonZero(Op1, Depth, Q));
351     }
352   }
353 
354   // If low bits are zero in either operand, output low known-0 bits.
355   // Also compute a conservative estimate for high known-0 bits.
356   // More trickiness is possible, but this is sufficient for the
357   // interesting case of alignment computation.
358   KnownOne.clearAllBits();
359   unsigned TrailZ = KnownZero.countTrailingOnes() +
360                     KnownZero2.countTrailingOnes();
361   unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
362                              KnownZero2.countLeadingOnes(),
363                              BitWidth) - BitWidth;
364 
365   TrailZ = std::min(TrailZ, BitWidth);
366   LeadZ = std::min(LeadZ, BitWidth);
367   KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
368               APInt::getHighBitsSet(BitWidth, LeadZ);
369 
370   // Only make use of no-wrap flags if we failed to compute the sign bit
371   // directly.  This matters if the multiplication always overflows, in
372   // which case we prefer to follow the result of the direct computation,
373   // though as the program is invoking undefined behaviour we can choose
374   // whatever we like here.
375   if (isKnownNonNegative && !KnownOne.isNegative())
376     KnownZero.setBit(BitWidth - 1);
377   else if (isKnownNegative && !KnownZero.isNegative())
378     KnownOne.setBit(BitWidth - 1);
379 }
380 
381 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
382                                              APInt &KnownZero,
383                                              APInt &KnownOne) {
384   unsigned BitWidth = KnownZero.getBitWidth();
385   unsigned NumRanges = Ranges.getNumOperands() / 2;
386   assert(NumRanges >= 1);
387 
388   KnownZero.setAllBits();
389   KnownOne.setAllBits();
390 
391   for (unsigned i = 0; i < NumRanges; ++i) {
392     ConstantInt *Lower =
393         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
394     ConstantInt *Upper =
395         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
396     ConstantRange Range(Lower->getValue(), Upper->getValue());
397 
398     // The first CommonPrefixBits of all values in Range are equal.
399     unsigned CommonPrefixBits =
400         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
401 
402     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
403     KnownOne &= Range.getUnsignedMax() & Mask;
404     KnownZero &= ~Range.getUnsignedMax() & Mask;
405   }
406 }
407 
408 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
409   SmallVector<const Value *, 16> WorkSet(1, I);
410   SmallPtrSet<const Value *, 32> Visited;
411   SmallPtrSet<const Value *, 16> EphValues;
412 
413   // The instruction defining an assumption's condition itself is always
414   // considered ephemeral to that assumption (even if it has other
415   // non-ephemeral users). See r246696's test case for an example.
416   if (is_contained(I->operands(), E))
417     return true;
418 
419   while (!WorkSet.empty()) {
420     const Value *V = WorkSet.pop_back_val();
421     if (!Visited.insert(V).second)
422       continue;
423 
424     // If all uses of this value are ephemeral, then so is this value.
425     if (all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) {
426       if (V == E)
427         return true;
428 
429       EphValues.insert(V);
430       if (const User *U = dyn_cast<User>(V))
431         for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
432              J != JE; ++J) {
433           if (isSafeToSpeculativelyExecute(*J))
434             WorkSet.push_back(*J);
435         }
436     }
437   }
438 
439   return false;
440 }
441 
442 // Is this an intrinsic that cannot be speculated but also cannot trap?
443 static bool isAssumeLikeIntrinsic(const Instruction *I) {
444   if (const CallInst *CI = dyn_cast<CallInst>(I))
445     if (Function *F = CI->getCalledFunction())
446       switch (F->getIntrinsicID()) {
447       default: break;
448       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
449       case Intrinsic::assume:
450       case Intrinsic::dbg_declare:
451       case Intrinsic::dbg_value:
452       case Intrinsic::invariant_start:
453       case Intrinsic::invariant_end:
454       case Intrinsic::lifetime_start:
455       case Intrinsic::lifetime_end:
456       case Intrinsic::objectsize:
457       case Intrinsic::ptr_annotation:
458       case Intrinsic::var_annotation:
459         return true;
460       }
461 
462   return false;
463 }
464 
465 bool llvm::isValidAssumeForContext(const Instruction *Inv,
466                                    const Instruction *CxtI,
467                                    const DominatorTree *DT) {
468 
469   // There are two restrictions on the use of an assume:
470   //  1. The assume must dominate the context (or the control flow must
471   //     reach the assume whenever it reaches the context).
472   //  2. The context must not be in the assume's set of ephemeral values
473   //     (otherwise we will use the assume to prove that the condition
474   //     feeding the assume is trivially true, thus causing the removal of
475   //     the assume).
476 
477   if (DT) {
478     if (DT->dominates(Inv, CxtI))
479       return true;
480   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
481     // We don't have a DT, but this trivially dominates.
482     return true;
483   }
484 
485   // With or without a DT, the only remaining case we will check is if the
486   // instructions are in the same BB.  Give up if that is not the case.
487   if (Inv->getParent() != CxtI->getParent())
488     return false;
489 
490   // If we have a dom tree, then we now know that the assume doens't dominate
491   // the other instruction.  If we don't have a dom tree then we can check if
492   // the assume is first in the BB.
493   if (!DT) {
494     // Search forward from the assume until we reach the context (or the end
495     // of the block); the common case is that the assume will come first.
496     for (auto I = std::next(BasicBlock::const_iterator(Inv)),
497          IE = Inv->getParent()->end(); I != IE; ++I)
498       if (&*I == CxtI)
499         return true;
500   }
501 
502   // The context comes first, but they're both in the same block. Make sure
503   // there is nothing in between that might interrupt the control flow.
504   for (BasicBlock::const_iterator I =
505          std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
506        I != IE; ++I)
507     if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
508       return false;
509 
510   return !isEphemeralValueOf(Inv, CxtI);
511 }
512 
513 static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
514                                        APInt &KnownOne, unsigned Depth,
515                                        const Query &Q) {
516   // Use of assumptions is context-sensitive. If we don't have a context, we
517   // cannot use them!
518   if (!Q.AC || !Q.CxtI)
519     return;
520 
521   unsigned BitWidth = KnownZero.getBitWidth();
522 
523   for (auto &AssumeVH : Q.AC->assumptions()) {
524     if (!AssumeVH)
525       continue;
526     CallInst *I = cast<CallInst>(AssumeVH);
527     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
528            "Got assumption for the wrong function!");
529     if (Q.isExcluded(I))
530       continue;
531 
532     // Warning: This loop can end up being somewhat performance sensetive.
533     // We're running this loop for once for each value queried resulting in a
534     // runtime of ~O(#assumes * #values).
535 
536     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
537            "must be an assume intrinsic");
538 
539     Value *Arg = I->getArgOperand(0);
540 
541     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
542       assert(BitWidth == 1 && "assume operand is not i1?");
543       KnownZero.clearAllBits();
544       KnownOne.setAllBits();
545       return;
546     }
547 
548     // The remaining tests are all recursive, so bail out if we hit the limit.
549     if (Depth == MaxDepth)
550       continue;
551 
552     Value *A, *B;
553     auto m_V = m_CombineOr(m_Specific(V),
554                            m_CombineOr(m_PtrToInt(m_Specific(V)),
555                            m_BitCast(m_Specific(V))));
556 
557     CmpInst::Predicate Pred;
558     ConstantInt *C;
559     // assume(v = a)
560     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
561         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
562       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
563       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
564       KnownZero |= RHSKnownZero;
565       KnownOne  |= RHSKnownOne;
566     // assume(v & b = a)
567     } else if (match(Arg,
568                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
569                Pred == ICmpInst::ICMP_EQ &&
570                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
571       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
572       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
573       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
574       computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
575 
576       // For those bits in the mask that are known to be one, we can propagate
577       // known bits from the RHS to V.
578       KnownZero |= RHSKnownZero & MaskKnownOne;
579       KnownOne  |= RHSKnownOne  & MaskKnownOne;
580     // assume(~(v & b) = a)
581     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
582                                    m_Value(A))) &&
583                Pred == ICmpInst::ICMP_EQ &&
584                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
585       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
586       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
587       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
588       computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
589 
590       // For those bits in the mask that are known to be one, we can propagate
591       // inverted known bits from the RHS to V.
592       KnownZero |= RHSKnownOne  & MaskKnownOne;
593       KnownOne  |= RHSKnownZero & MaskKnownOne;
594     // assume(v | b = a)
595     } else if (match(Arg,
596                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
597                Pred == ICmpInst::ICMP_EQ &&
598                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
599       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
600       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
601       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
602       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
603 
604       // For those bits in B that are known to be zero, we can propagate known
605       // bits from the RHS to V.
606       KnownZero |= RHSKnownZero & BKnownZero;
607       KnownOne  |= RHSKnownOne  & BKnownZero;
608     // assume(~(v | b) = a)
609     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
610                                    m_Value(A))) &&
611                Pred == ICmpInst::ICMP_EQ &&
612                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
613       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
614       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
615       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
616       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
617 
618       // For those bits in B that are known to be zero, we can propagate
619       // inverted known bits from the RHS to V.
620       KnownZero |= RHSKnownOne  & BKnownZero;
621       KnownOne  |= RHSKnownZero & BKnownZero;
622     // assume(v ^ b = a)
623     } else if (match(Arg,
624                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
625                Pred == ICmpInst::ICMP_EQ &&
626                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
627       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
628       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
629       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
630       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
631 
632       // For those bits in B that are known to be zero, we can propagate known
633       // bits from the RHS to V. For those bits in B that are known to be one,
634       // we can propagate inverted known bits from the RHS to V.
635       KnownZero |= RHSKnownZero & BKnownZero;
636       KnownOne  |= RHSKnownOne  & BKnownZero;
637       KnownZero |= RHSKnownOne  & BKnownOne;
638       KnownOne  |= RHSKnownZero & BKnownOne;
639     // assume(~(v ^ b) = a)
640     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
641                                    m_Value(A))) &&
642                Pred == ICmpInst::ICMP_EQ &&
643                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
644       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
645       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
646       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
647       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
648 
649       // For those bits in B that are known to be zero, we can propagate
650       // inverted known bits from the RHS to V. For those bits in B that are
651       // known to be one, we can propagate known bits from the RHS to V.
652       KnownZero |= RHSKnownOne  & BKnownZero;
653       KnownOne  |= RHSKnownZero & BKnownZero;
654       KnownZero |= RHSKnownZero & BKnownOne;
655       KnownOne  |= RHSKnownOne  & BKnownOne;
656     // assume(v << c = a)
657     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
658                                    m_Value(A))) &&
659                Pred == ICmpInst::ICMP_EQ &&
660                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
661       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
662       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
663       // For those bits in RHS that are known, we can propagate them to known
664       // bits in V shifted to the right by C.
665       KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
666       KnownOne  |= RHSKnownOne.lshr(C->getZExtValue());
667     // assume(~(v << c) = a)
668     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
669                                    m_Value(A))) &&
670                Pred == ICmpInst::ICMP_EQ &&
671                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
672       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
673       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
674       // For those bits in RHS that are known, we can propagate them inverted
675       // to known bits in V shifted to the right by C.
676       KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
677       KnownOne  |= RHSKnownZero.lshr(C->getZExtValue());
678     // assume(v >> c = a)
679     } else if (match(Arg,
680                      m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
681                                                 m_AShr(m_V, m_ConstantInt(C))),
682                               m_Value(A))) &&
683                Pred == ICmpInst::ICMP_EQ &&
684                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
685       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
686       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
687       // For those bits in RHS that are known, we can propagate them to known
688       // bits in V shifted to the right by C.
689       KnownZero |= RHSKnownZero << C->getZExtValue();
690       KnownOne  |= RHSKnownOne  << C->getZExtValue();
691     // assume(~(v >> c) = a)
692     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
693                                              m_LShr(m_V, m_ConstantInt(C)),
694                                              m_AShr(m_V, m_ConstantInt(C)))),
695                                    m_Value(A))) &&
696                Pred == ICmpInst::ICMP_EQ &&
697                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
698       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
699       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
700       // For those bits in RHS that are known, we can propagate them inverted
701       // to known bits in V shifted to the right by C.
702       KnownZero |= RHSKnownOne  << C->getZExtValue();
703       KnownOne  |= RHSKnownZero << C->getZExtValue();
704     // assume(v >=_s c) where c is non-negative
705     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
706                Pred == ICmpInst::ICMP_SGE &&
707                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
708       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
709       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
710 
711       if (RHSKnownZero.isNegative()) {
712         // We know that the sign bit is zero.
713         KnownZero |= APInt::getSignBit(BitWidth);
714       }
715     // assume(v >_s c) where c is at least -1.
716     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
717                Pred == ICmpInst::ICMP_SGT &&
718                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
719       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
720       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
721 
722       if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
723         // We know that the sign bit is zero.
724         KnownZero |= APInt::getSignBit(BitWidth);
725       }
726     // assume(v <=_s c) where c is negative
727     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
728                Pred == ICmpInst::ICMP_SLE &&
729                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
730       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
731       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
732 
733       if (RHSKnownOne.isNegative()) {
734         // We know that the sign bit is one.
735         KnownOne |= APInt::getSignBit(BitWidth);
736       }
737     // assume(v <_s c) where c is non-positive
738     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
739                Pred == ICmpInst::ICMP_SLT &&
740                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
741       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
742       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
743 
744       if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
745         // We know that the sign bit is one.
746         KnownOne |= APInt::getSignBit(BitWidth);
747       }
748     // assume(v <=_u c)
749     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
750                Pred == ICmpInst::ICMP_ULE &&
751                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
752       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
753       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
754 
755       // Whatever high bits in c are zero are known to be zero.
756       KnownZero |=
757         APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
758     // assume(v <_u c)
759     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
760                Pred == ICmpInst::ICMP_ULT &&
761                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
762       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
763       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
764 
765       // Whatever high bits in c are zero are known to be zero (if c is a power
766       // of 2, then one more).
767       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
768         KnownZero |=
769           APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1);
770       else
771         KnownZero |=
772           APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
773     }
774   }
775 }
776 
777 // Compute known bits from a shift operator, including those with a
778 // non-constant shift amount. KnownZero and KnownOne are the outputs of this
779 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the
780 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific
781 // functors that, given the known-zero or known-one bits respectively, and a
782 // shift amount, compute the implied known-zero or known-one bits of the shift
783 // operator's result respectively for that shift amount. The results from calling
784 // KZF and KOF are conservatively combined for all permitted shift amounts.
785 template <typename KZFunctor, typename KOFunctor>
786 static void computeKnownBitsFromShiftOperator(const Operator *I,
787               APInt &KnownZero, APInt &KnownOne,
788               APInt &KnownZero2, APInt &KnownOne2,
789               unsigned Depth, const Query &Q, KZFunctor KZF, KOFunctor KOF) {
790   unsigned BitWidth = KnownZero.getBitWidth();
791 
792   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
793     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
794 
795     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
796     KnownZero = KZF(KnownZero, ShiftAmt);
797     KnownOne  = KOF(KnownOne, ShiftAmt);
798     return;
799   }
800 
801   computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
802 
803   // Note: We cannot use KnownZero.getLimitedValue() here, because if
804   // BitWidth > 64 and any upper bits are known, we'll end up returning the
805   // limit value (which implies all bits are known).
806   uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue();
807   uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue();
808 
809   // It would be more-clearly correct to use the two temporaries for this
810   // calculation. Reusing the APInts here to prevent unnecessary allocations.
811   KnownZero.clearAllBits();
812   KnownOne.clearAllBits();
813 
814   // If we know the shifter operand is nonzero, we can sometimes infer more
815   // known bits. However this is expensive to compute, so be lazy about it and
816   // only compute it when absolutely necessary.
817   Optional<bool> ShifterOperandIsNonZero;
818 
819   // Early exit if we can't constrain any well-defined shift amount.
820   if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
821     ShifterOperandIsNonZero =
822         isKnownNonZero(I->getOperand(1), Depth + 1, Q);
823     if (!*ShifterOperandIsNonZero)
824       return;
825   }
826 
827   computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
828 
829   KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
830   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
831     // Combine the shifted known input bits only for those shift amounts
832     // compatible with its known constraints.
833     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
834       continue;
835     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
836       continue;
837     // If we know the shifter is nonzero, we may be able to infer more known
838     // bits. This check is sunk down as far as possible to avoid the expensive
839     // call to isKnownNonZero if the cheaper checks above fail.
840     if (ShiftAmt == 0) {
841       if (!ShifterOperandIsNonZero.hasValue())
842         ShifterOperandIsNonZero =
843             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
844       if (*ShifterOperandIsNonZero)
845         continue;
846     }
847 
848     KnownZero &= KZF(KnownZero2, ShiftAmt);
849     KnownOne  &= KOF(KnownOne2, ShiftAmt);
850   }
851 
852   // If there are no compatible shift amounts, then we've proven that the shift
853   // amount must be >= the BitWidth, and the result is undefined. We could
854   // return anything we'd like, but we need to make sure the sets of known bits
855   // stay disjoint (it should be better for some other code to actually
856   // propagate the undef than to pick a value here using known bits).
857   if ((KnownZero & KnownOne) != 0) {
858     KnownZero.clearAllBits();
859     KnownOne.clearAllBits();
860   }
861 }
862 
863 static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
864                                          APInt &KnownOne, unsigned Depth,
865                                          const Query &Q) {
866   unsigned BitWidth = KnownZero.getBitWidth();
867 
868   APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
869   switch (I->getOpcode()) {
870   default: break;
871   case Instruction::Load:
872     if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
873       computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
874     break;
875   case Instruction::And: {
876     // If either the LHS or the RHS are Zero, the result is zero.
877     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
878     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
879 
880     // Output known-1 bits are only known if set in both the LHS & RHS.
881     KnownOne &= KnownOne2;
882     // Output known-0 are known to be clear if zero in either the LHS | RHS.
883     KnownZero |= KnownZero2;
884 
885     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
886     // here we handle the more general case of adding any odd number by
887     // matching the form add(x, add(x, y)) where y is odd.
888     // TODO: This could be generalized to clearing any bit set in y where the
889     // following bit is known to be unset in y.
890     Value *Y = nullptr;
891     if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
892                                       m_Value(Y))) ||
893         match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
894                                       m_Value(Y)))) {
895       APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0);
896       computeKnownBits(Y, KnownZero3, KnownOne3, Depth + 1, Q);
897       if (KnownOne3.countTrailingOnes() > 0)
898         KnownZero |= APInt::getLowBitsSet(BitWidth, 1);
899     }
900     break;
901   }
902   case Instruction::Or: {
903     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
904     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
905 
906     // Output known-0 bits are only known if clear in both the LHS & RHS.
907     KnownZero &= KnownZero2;
908     // Output known-1 are known to be set if set in either the LHS | RHS.
909     KnownOne |= KnownOne2;
910     break;
911   }
912   case Instruction::Xor: {
913     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
914     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
915 
916     // Output known-0 bits are known if clear or set in both the LHS & RHS.
917     APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
918     // Output known-1 are known to be set if set in only one of the LHS, RHS.
919     KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
920     KnownZero = KnownZeroOut;
921     break;
922   }
923   case Instruction::Mul: {
924     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
925     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
926                         KnownOne, KnownZero2, KnownOne2, Depth, Q);
927     break;
928   }
929   case Instruction::UDiv: {
930     // For the purposes of computing leading zeros we can conservatively
931     // treat a udiv as a logical right shift by the power of 2 known to
932     // be less than the denominator.
933     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
934     unsigned LeadZ = KnownZero2.countLeadingOnes();
935 
936     KnownOne2.clearAllBits();
937     KnownZero2.clearAllBits();
938     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
939     unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
940     if (RHSUnknownLeadingOnes != BitWidth)
941       LeadZ = std::min(BitWidth,
942                        LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
943 
944     KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
945     break;
946   }
947   case Instruction::Select: {
948     computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
949     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
950 
951     const Value *LHS;
952     const Value *RHS;
953     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
954     if (SelectPatternResult::isMinOrMax(SPF)) {
955       computeKnownBits(RHS, KnownZero, KnownOne, Depth + 1, Q);
956       computeKnownBits(LHS, KnownZero2, KnownOne2, Depth + 1, Q);
957     } else {
958       computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
959       computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
960     }
961 
962     unsigned MaxHighOnes = 0;
963     unsigned MaxHighZeros = 0;
964     if (SPF == SPF_SMAX) {
965       // If both sides are negative, the result is negative.
966       if (KnownOne[BitWidth - 1] && KnownOne2[BitWidth - 1])
967         // We can derive a lower bound on the result by taking the max of the
968         // leading one bits.
969         MaxHighOnes =
970             std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
971       // If either side is non-negative, the result is non-negative.
972       else if (KnownZero[BitWidth - 1] || KnownZero2[BitWidth - 1])
973         MaxHighZeros = 1;
974     } else if (SPF == SPF_SMIN) {
975       // If both sides are non-negative, the result is non-negative.
976       if (KnownZero[BitWidth - 1] && KnownZero2[BitWidth - 1])
977         // We can derive an upper bound on the result by taking the max of the
978         // leading zero bits.
979         MaxHighZeros = std::max(KnownZero.countLeadingOnes(),
980                                 KnownZero2.countLeadingOnes());
981       // If either side is negative, the result is negative.
982       else if (KnownOne[BitWidth - 1] || KnownOne2[BitWidth - 1])
983         MaxHighOnes = 1;
984     } else if (SPF == SPF_UMAX) {
985       // We can derive a lower bound on the result by taking the max of the
986       // leading one bits.
987       MaxHighOnes =
988           std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
989     } else if (SPF == SPF_UMIN) {
990       // We can derive an upper bound on the result by taking the max of the
991       // leading zero bits.
992       MaxHighZeros =
993           std::max(KnownZero.countLeadingOnes(), KnownZero2.countLeadingOnes());
994     }
995 
996     // Only known if known in both the LHS and RHS.
997     KnownOne &= KnownOne2;
998     KnownZero &= KnownZero2;
999     if (MaxHighOnes > 0)
1000       KnownOne |= APInt::getHighBitsSet(BitWidth, MaxHighOnes);
1001     if (MaxHighZeros > 0)
1002       KnownZero |= APInt::getHighBitsSet(BitWidth, MaxHighZeros);
1003     break;
1004   }
1005   case Instruction::FPTrunc:
1006   case Instruction::FPExt:
1007   case Instruction::FPToUI:
1008   case Instruction::FPToSI:
1009   case Instruction::SIToFP:
1010   case Instruction::UIToFP:
1011     break; // Can't work with floating point.
1012   case Instruction::PtrToInt:
1013   case Instruction::IntToPtr:
1014   case Instruction::AddrSpaceCast: // Pointers could be different sizes.
1015     // Fall through and handle them the same as zext/trunc.
1016     LLVM_FALLTHROUGH;
1017   case Instruction::ZExt:
1018   case Instruction::Trunc: {
1019     Type *SrcTy = I->getOperand(0)->getType();
1020 
1021     unsigned SrcBitWidth;
1022     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1023     // which fall through here.
1024     SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1025 
1026     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1027     KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
1028     KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
1029     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1030     KnownZero = KnownZero.zextOrTrunc(BitWidth);
1031     KnownOne = KnownOne.zextOrTrunc(BitWidth);
1032     // Any top bits are known to be zero.
1033     if (BitWidth > SrcBitWidth)
1034       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1035     break;
1036   }
1037   case Instruction::BitCast: {
1038     Type *SrcTy = I->getOperand(0)->getType();
1039     if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1040         // TODO: For now, not handling conversions like:
1041         // (bitcast i64 %x to <2 x i32>)
1042         !I->getType()->isVectorTy()) {
1043       computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1044       break;
1045     }
1046     break;
1047   }
1048   case Instruction::SExt: {
1049     // Compute the bits in the result that are not present in the input.
1050     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1051 
1052     KnownZero = KnownZero.trunc(SrcBitWidth);
1053     KnownOne = KnownOne.trunc(SrcBitWidth);
1054     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1055     KnownZero = KnownZero.zext(BitWidth);
1056     KnownOne = KnownOne.zext(BitWidth);
1057 
1058     // If the sign bit of the input is known set or clear, then we know the
1059     // top bits of the result.
1060     if (KnownZero[SrcBitWidth-1])             // Input sign bit known zero
1061       KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1062     else if (KnownOne[SrcBitWidth-1])           // Input sign bit known set
1063       KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1064     break;
1065   }
1066   case Instruction::Shl: {
1067     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1068     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1069       return (KnownZero << ShiftAmt) |
1070              APInt::getLowBitsSet(BitWidth, ShiftAmt); // Low bits known 0.
1071     };
1072 
1073     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1074       return KnownOne << ShiftAmt;
1075     };
1076 
1077     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1078                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1079                                       KOF);
1080     break;
1081   }
1082   case Instruction::LShr: {
1083     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1084     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1085       return APIntOps::lshr(KnownZero, ShiftAmt) |
1086              // High bits known zero.
1087              APInt::getHighBitsSet(BitWidth, ShiftAmt);
1088     };
1089 
1090     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1091       return APIntOps::lshr(KnownOne, ShiftAmt);
1092     };
1093 
1094     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1095                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1096                                       KOF);
1097     break;
1098   }
1099   case Instruction::AShr: {
1100     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1101     auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1102       return APIntOps::ashr(KnownZero, ShiftAmt);
1103     };
1104 
1105     auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1106       return APIntOps::ashr(KnownOne, ShiftAmt);
1107     };
1108 
1109     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1110                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1111                                       KOF);
1112     break;
1113   }
1114   case Instruction::Sub: {
1115     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1116     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1117                            KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1118                            Q);
1119     break;
1120   }
1121   case Instruction::Add: {
1122     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1123     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1124                            KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1125                            Q);
1126     break;
1127   }
1128   case Instruction::SRem:
1129     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1130       APInt RA = Rem->getValue().abs();
1131       if (RA.isPowerOf2()) {
1132         APInt LowBits = RA - 1;
1133         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1,
1134                          Q);
1135 
1136         // The low bits of the first operand are unchanged by the srem.
1137         KnownZero = KnownZero2 & LowBits;
1138         KnownOne = KnownOne2 & LowBits;
1139 
1140         // If the first operand is non-negative or has all low bits zero, then
1141         // the upper bits are all zero.
1142         if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
1143           KnownZero |= ~LowBits;
1144 
1145         // If the first operand is negative and not all low bits are zero, then
1146         // the upper bits are all one.
1147         if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
1148           KnownOne |= ~LowBits;
1149 
1150         assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1151       }
1152     }
1153 
1154     // The sign bit is the LHS's sign bit, except when the result of the
1155     // remainder is zero.
1156     if (KnownZero.isNonNegative()) {
1157       APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
1158       computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
1159                        Q);
1160       // If it's known zero, our sign bit is also zero.
1161       if (LHSKnownZero.isNegative())
1162         KnownZero.setBit(BitWidth - 1);
1163     }
1164 
1165     break;
1166   case Instruction::URem: {
1167     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1168       const APInt &RA = Rem->getValue();
1169       if (RA.isPowerOf2()) {
1170         APInt LowBits = (RA - 1);
1171         computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1172         KnownZero |= ~LowBits;
1173         KnownOne &= LowBits;
1174         break;
1175       }
1176     }
1177 
1178     // Since the result is less than or equal to either operand, any leading
1179     // zero bits in either operand must also exist in the result.
1180     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1181     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
1182 
1183     unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
1184                                 KnownZero2.countLeadingOnes());
1185     KnownOne.clearAllBits();
1186     KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
1187     break;
1188   }
1189 
1190   case Instruction::Alloca: {
1191     const AllocaInst *AI = cast<AllocaInst>(I);
1192     unsigned Align = AI->getAlignment();
1193     if (Align == 0)
1194       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1195 
1196     if (Align > 0)
1197       KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1198     break;
1199   }
1200   case Instruction::GetElementPtr: {
1201     // Analyze all of the subscripts of this getelementptr instruction
1202     // to determine if we can prove known low zero bits.
1203     APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1204     computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, Depth + 1,
1205                      Q);
1206     unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1207 
1208     gep_type_iterator GTI = gep_type_begin(I);
1209     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1210       Value *Index = I->getOperand(i);
1211       if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1212         // Handle struct member offset arithmetic.
1213 
1214         // Handle case when index is vector zeroinitializer
1215         Constant *CIndex = cast<Constant>(Index);
1216         if (CIndex->isZeroValue())
1217           continue;
1218 
1219         if (CIndex->getType()->isVectorTy())
1220           Index = CIndex->getSplatValue();
1221 
1222         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1223         const StructLayout *SL = Q.DL.getStructLayout(STy);
1224         uint64_t Offset = SL->getElementOffset(Idx);
1225         TrailZ = std::min<unsigned>(TrailZ,
1226                                     countTrailingZeros(Offset));
1227       } else {
1228         // Handle array index arithmetic.
1229         Type *IndexedTy = GTI.getIndexedType();
1230         if (!IndexedTy->isSized()) {
1231           TrailZ = 0;
1232           break;
1233         }
1234         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1235         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1236         LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1237         computeKnownBits(Index, LocalKnownZero, LocalKnownOne, Depth + 1, Q);
1238         TrailZ = std::min(TrailZ,
1239                           unsigned(countTrailingZeros(TypeSize) +
1240                                    LocalKnownZero.countTrailingOnes()));
1241       }
1242     }
1243 
1244     KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
1245     break;
1246   }
1247   case Instruction::PHI: {
1248     const PHINode *P = cast<PHINode>(I);
1249     // Handle the case of a simple two-predecessor recurrence PHI.
1250     // There's a lot more that could theoretically be done here, but
1251     // this is sufficient to catch some interesting cases.
1252     if (P->getNumIncomingValues() == 2) {
1253       for (unsigned i = 0; i != 2; ++i) {
1254         Value *L = P->getIncomingValue(i);
1255         Value *R = P->getIncomingValue(!i);
1256         Operator *LU = dyn_cast<Operator>(L);
1257         if (!LU)
1258           continue;
1259         unsigned Opcode = LU->getOpcode();
1260         // Check for operations that have the property that if
1261         // both their operands have low zero bits, the result
1262         // will have low zero bits. Also check for operations
1263         // that are known to produce non-negative or negative
1264         // recurrence values.
1265         if (Opcode == Instruction::Add ||
1266             Opcode == Instruction::Sub ||
1267             Opcode == Instruction::And ||
1268             Opcode == Instruction::Or ||
1269             Opcode == Instruction::Mul) {
1270           Value *LL = LU->getOperand(0);
1271           Value *LR = LU->getOperand(1);
1272           // Find a recurrence.
1273           if (LL == I)
1274             L = LR;
1275           else if (LR == I)
1276             L = LL;
1277           else
1278             break;
1279           // Ok, we have a PHI of the form L op= R. Check for low
1280           // zero bits.
1281           computeKnownBits(R, KnownZero2, KnownOne2, Depth + 1, Q);
1282 
1283           // We need to take the minimum number of known bits
1284           APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
1285           computeKnownBits(L, KnownZero3, KnownOne3, Depth + 1, Q);
1286 
1287           KnownZero = APInt::getLowBitsSet(BitWidth,
1288                                            std::min(KnownZero2.countTrailingOnes(),
1289                                                     KnownZero3.countTrailingOnes()));
1290 
1291           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1292           if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1293             // If initial value of recurrence is nonnegative, and we are adding
1294             // a nonnegative number with nsw, the result can only be nonnegative
1295             // or poison value regardless of the number of times we execute the
1296             // add in phi recurrence. If initial value is negative and we are
1297             // adding a negative number with nsw, the result can only be
1298             // negative or poison value. Similar arguments apply to sub and mul.
1299             //
1300             // (add non-negative, non-negative) --> non-negative
1301             // (add negative, negative) --> negative
1302             if (Opcode == Instruction::Add) {
1303               if (KnownZero2.isNegative() && KnownZero3.isNegative())
1304                 KnownZero.setBit(BitWidth - 1);
1305               else if (KnownOne2.isNegative() && KnownOne3.isNegative())
1306                 KnownOne.setBit(BitWidth - 1);
1307             }
1308 
1309             // (sub nsw non-negative, negative) --> non-negative
1310             // (sub nsw negative, non-negative) --> negative
1311             else if (Opcode == Instruction::Sub && LL == I) {
1312               if (KnownZero2.isNegative() && KnownOne3.isNegative())
1313                 KnownZero.setBit(BitWidth - 1);
1314               else if (KnownOne2.isNegative() && KnownZero3.isNegative())
1315                 KnownOne.setBit(BitWidth - 1);
1316             }
1317 
1318             // (mul nsw non-negative, non-negative) --> non-negative
1319             else if (Opcode == Instruction::Mul && KnownZero2.isNegative() &&
1320                      KnownZero3.isNegative())
1321               KnownZero.setBit(BitWidth - 1);
1322           }
1323 
1324           break;
1325         }
1326       }
1327     }
1328 
1329     // Unreachable blocks may have zero-operand PHI nodes.
1330     if (P->getNumIncomingValues() == 0)
1331       break;
1332 
1333     // Otherwise take the unions of the known bit sets of the operands,
1334     // taking conservative care to avoid excessive recursion.
1335     if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
1336       // Skip if every incoming value references to ourself.
1337       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1338         break;
1339 
1340       KnownZero = APInt::getAllOnesValue(BitWidth);
1341       KnownOne = APInt::getAllOnesValue(BitWidth);
1342       for (Value *IncValue : P->incoming_values()) {
1343         // Skip direct self references.
1344         if (IncValue == P) continue;
1345 
1346         KnownZero2 = APInt(BitWidth, 0);
1347         KnownOne2 = APInt(BitWidth, 0);
1348         // Recurse, but cap the recursion to one level, because we don't
1349         // want to waste time spinning around in loops.
1350         computeKnownBits(IncValue, KnownZero2, KnownOne2, MaxDepth - 1, Q);
1351         KnownZero &= KnownZero2;
1352         KnownOne &= KnownOne2;
1353         // If all bits have been ruled out, there's no need to check
1354         // more operands.
1355         if (!KnownZero && !KnownOne)
1356           break;
1357       }
1358     }
1359     break;
1360   }
1361   case Instruction::Call:
1362   case Instruction::Invoke:
1363     // If range metadata is attached to this call, set known bits from that,
1364     // and then intersect with known bits based on other properties of the
1365     // function.
1366     if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1367       computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
1368     if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1369       computeKnownBits(RV, KnownZero2, KnownOne2, Depth + 1, Q);
1370       KnownZero |= KnownZero2;
1371       KnownOne |= KnownOne2;
1372     }
1373     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1374       switch (II->getIntrinsicID()) {
1375       default: break;
1376       case Intrinsic::bswap:
1377         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1378         KnownZero |= KnownZero2.byteSwap();
1379         KnownOne |= KnownOne2.byteSwap();
1380         break;
1381       case Intrinsic::ctlz:
1382       case Intrinsic::cttz: {
1383         unsigned LowBits = Log2_32(BitWidth)+1;
1384         // If this call is undefined for 0, the result will be less than 2^n.
1385         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1386           LowBits -= 1;
1387         KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1388         break;
1389       }
1390       case Intrinsic::ctpop: {
1391         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1392         // We can bound the space the count needs.  Also, bits known to be zero
1393         // can't contribute to the population.
1394         unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
1395         unsigned LeadingZeros =
1396           APInt(BitWidth, BitsPossiblySet).countLeadingZeros();
1397         assert(LeadingZeros <= BitWidth);
1398         KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros);
1399         KnownOne &= ~KnownZero;
1400         // TODO: we could bound KnownOne using the lower bound on the number
1401         // of bits which might be set provided by popcnt KnownOne2.
1402         break;
1403       }
1404       case Intrinsic::x86_sse42_crc32_64_64:
1405         KnownZero |= APInt::getHighBitsSet(64, 32);
1406         break;
1407       }
1408     }
1409     break;
1410   case Instruction::ExtractValue:
1411     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1412       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1413       if (EVI->getNumIndices() != 1) break;
1414       if (EVI->getIndices()[0] == 0) {
1415         switch (II->getIntrinsicID()) {
1416         default: break;
1417         case Intrinsic::uadd_with_overflow:
1418         case Intrinsic::sadd_with_overflow:
1419           computeKnownBitsAddSub(true, II->getArgOperand(0),
1420                                  II->getArgOperand(1), false, KnownZero,
1421                                  KnownOne, KnownZero2, KnownOne2, Depth, Q);
1422           break;
1423         case Intrinsic::usub_with_overflow:
1424         case Intrinsic::ssub_with_overflow:
1425           computeKnownBitsAddSub(false, II->getArgOperand(0),
1426                                  II->getArgOperand(1), false, KnownZero,
1427                                  KnownOne, KnownZero2, KnownOne2, Depth, Q);
1428           break;
1429         case Intrinsic::umul_with_overflow:
1430         case Intrinsic::smul_with_overflow:
1431           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1432                               KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1433                               Q);
1434           break;
1435         }
1436       }
1437     }
1438   }
1439 }
1440 
1441 /// Determine which bits of V are known to be either zero or one and return
1442 /// them in the KnownZero/KnownOne bit sets.
1443 ///
1444 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1445 /// we cannot optimize based on the assumption that it is zero without changing
1446 /// it to be an explicit zero.  If we don't change it to zero, other code could
1447 /// optimized based on the contradictory assumption that it is non-zero.
1448 /// Because instcombine aggressively folds operations with undef args anyway,
1449 /// this won't lose us code quality.
1450 ///
1451 /// This function is defined on values with integer type, values with pointer
1452 /// type, and vectors of integers.  In the case
1453 /// where V is a vector, known zero, and known one values are the
1454 /// same width as the vector element, and the bit is set only if it is true
1455 /// for all of the elements in the vector.
1456 void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
1457                       unsigned Depth, const Query &Q) {
1458   assert(V && "No Value?");
1459   assert(Depth <= MaxDepth && "Limit Search Depth");
1460   unsigned BitWidth = KnownZero.getBitWidth();
1461 
1462   assert((V->getType()->isIntOrIntVectorTy() ||
1463           V->getType()->getScalarType()->isPointerTy()) &&
1464          "Not integer or pointer type!");
1465   assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1466          (!V->getType()->isIntOrIntVectorTy() ||
1467           V->getType()->getScalarSizeInBits() == BitWidth) &&
1468          KnownZero.getBitWidth() == BitWidth &&
1469          KnownOne.getBitWidth() == BitWidth &&
1470          "V, KnownOne and KnownZero should have same BitWidth");
1471 
1472   if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1473     // We know all of the bits for a constant!
1474     KnownOne = CI->getValue();
1475     KnownZero = ~KnownOne;
1476     return;
1477   }
1478   // Null and aggregate-zero are all-zeros.
1479   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1480     KnownOne.clearAllBits();
1481     KnownZero = APInt::getAllOnesValue(BitWidth);
1482     return;
1483   }
1484   // Handle a constant vector by taking the intersection of the known bits of
1485   // each element.
1486   if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1487     // We know that CDS must be a vector of integers. Take the intersection of
1488     // each element.
1489     KnownZero.setAllBits(); KnownOne.setAllBits();
1490     APInt Elt(KnownZero.getBitWidth(), 0);
1491     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1492       Elt = CDS->getElementAsInteger(i);
1493       KnownZero &= ~Elt;
1494       KnownOne &= Elt;
1495     }
1496     return;
1497   }
1498 
1499   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1500     // We know that CV must be a vector of integers. Take the intersection of
1501     // each element.
1502     KnownZero.setAllBits(); KnownOne.setAllBits();
1503     APInt Elt(KnownZero.getBitWidth(), 0);
1504     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1505       Constant *Element = CV->getAggregateElement(i);
1506       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1507       if (!ElementCI) {
1508         KnownZero.clearAllBits();
1509         KnownOne.clearAllBits();
1510         return;
1511       }
1512       Elt = ElementCI->getValue();
1513       KnownZero &= ~Elt;
1514       KnownOne &= Elt;
1515     }
1516     return;
1517   }
1518 
1519   // Start out not knowing anything.
1520   KnownZero.clearAllBits(); KnownOne.clearAllBits();
1521 
1522   // Limit search depth.
1523   // All recursive calls that increase depth must come after this.
1524   if (Depth == MaxDepth)
1525     return;
1526 
1527   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1528   // the bits of its aliasee.
1529   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1530     if (!GA->isInterposable())
1531       computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q);
1532     return;
1533   }
1534 
1535   if (const Operator *I = dyn_cast<Operator>(V))
1536     computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q);
1537 
1538   // Aligned pointers have trailing zeros - refine KnownZero set
1539   if (V->getType()->isPointerTy()) {
1540     unsigned Align = V->getPointerAlignment(Q.DL);
1541     if (Align)
1542       KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1543   }
1544 
1545   // computeKnownBitsFromAssume strictly refines KnownZero and
1546   // KnownOne. Therefore, we run them after computeKnownBitsFromOperator.
1547 
1548   // Check whether a nearby assume intrinsic can determine some known bits.
1549   computeKnownBitsFromAssume(V, KnownZero, KnownOne, Depth, Q);
1550 
1551   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1552 }
1553 
1554 /// Determine whether the sign bit is known to be zero or one.
1555 /// Convenience wrapper around computeKnownBits.
1556 void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
1557                     unsigned Depth, const Query &Q) {
1558   unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
1559   if (!BitWidth) {
1560     KnownZero = false;
1561     KnownOne = false;
1562     return;
1563   }
1564   APInt ZeroBits(BitWidth, 0);
1565   APInt OneBits(BitWidth, 0);
1566   computeKnownBits(V, ZeroBits, OneBits, Depth, Q);
1567   KnownOne = OneBits[BitWidth - 1];
1568   KnownZero = ZeroBits[BitWidth - 1];
1569 }
1570 
1571 /// Return true if the given value is known to have exactly one
1572 /// bit set when defined. For vectors return true if every element is known to
1573 /// be a power of two when defined. Supports values with integer or pointer
1574 /// types and vectors of integers.
1575 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1576                             const Query &Q) {
1577   if (const Constant *C = dyn_cast<Constant>(V)) {
1578     if (C->isNullValue())
1579       return OrZero;
1580 
1581     const APInt *ConstIntOrConstSplatInt;
1582     if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1583       return ConstIntOrConstSplatInt->isPowerOf2();
1584   }
1585 
1586   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1587   // it is shifted off the end then the result is undefined.
1588   if (match(V, m_Shl(m_One(), m_Value())))
1589     return true;
1590 
1591   // (signbit) >>l X is clearly a power of two if the one is not shifted off the
1592   // bottom.  If it is shifted off the bottom then the result is undefined.
1593   if (match(V, m_LShr(m_SignBit(), m_Value())))
1594     return true;
1595 
1596   // The remaining tests are all recursive, so bail out if we hit the limit.
1597   if (Depth++ == MaxDepth)
1598     return false;
1599 
1600   Value *X = nullptr, *Y = nullptr;
1601   // A shift left or a logical shift right of a power of two is a power of two
1602   // or zero.
1603   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1604                  match(V, m_LShr(m_Value(X), m_Value()))))
1605     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1606 
1607   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1608     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1609 
1610   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1611     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1612            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1613 
1614   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1615     // A power of two and'd with anything is a power of two or zero.
1616     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1617         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1618       return true;
1619     // X & (-X) is always a power of two or zero.
1620     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1621       return true;
1622     return false;
1623   }
1624 
1625   // Adding a power-of-two or zero to the same power-of-two or zero yields
1626   // either the original power-of-two, a larger power-of-two or zero.
1627   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1628     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1629     if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1630       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1631           match(X, m_And(m_Value(), m_Specific(Y))))
1632         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1633           return true;
1634       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1635           match(Y, m_And(m_Value(), m_Specific(X))))
1636         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1637           return true;
1638 
1639       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1640       APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
1641       computeKnownBits(X, LHSZeroBits, LHSOneBits, Depth, Q);
1642 
1643       APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
1644       computeKnownBits(Y, RHSZeroBits, RHSOneBits, Depth, Q);
1645       // If i8 V is a power of two or zero:
1646       //  ZeroBits: 1 1 1 0 1 1 1 1
1647       // ~ZeroBits: 0 0 0 1 0 0 0 0
1648       if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
1649         // If OrZero isn't set, we cannot give back a zero result.
1650         // Make sure either the LHS or RHS has a bit set.
1651         if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
1652           return true;
1653     }
1654   }
1655 
1656   // An exact divide or right shift can only shift off zero bits, so the result
1657   // is a power of two only if the first operand is a power of two and not
1658   // copying a sign bit (sdiv int_min, 2).
1659   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1660       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1661     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1662                                   Depth, Q);
1663   }
1664 
1665   return false;
1666 }
1667 
1668 /// \brief Test whether a GEP's result is known to be non-null.
1669 ///
1670 /// Uses properties inherent in a GEP to try to determine whether it is known
1671 /// to be non-null.
1672 ///
1673 /// Currently this routine does not support vector GEPs.
1674 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1675                               const Query &Q) {
1676   if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1677     return false;
1678 
1679   // FIXME: Support vector-GEPs.
1680   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1681 
1682   // If the base pointer is non-null, we cannot walk to a null address with an
1683   // inbounds GEP in address space zero.
1684   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1685     return true;
1686 
1687   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1688   // If so, then the GEP cannot produce a null pointer, as doing so would
1689   // inherently violate the inbounds contract within address space zero.
1690   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1691        GTI != GTE; ++GTI) {
1692     // Struct types are easy -- they must always be indexed by a constant.
1693     if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1694       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1695       unsigned ElementIdx = OpC->getZExtValue();
1696       const StructLayout *SL = Q.DL.getStructLayout(STy);
1697       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1698       if (ElementOffset > 0)
1699         return true;
1700       continue;
1701     }
1702 
1703     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1704     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1705       continue;
1706 
1707     // Fast path the constant operand case both for efficiency and so we don't
1708     // increment Depth when just zipping down an all-constant GEP.
1709     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1710       if (!OpC->isZero())
1711         return true;
1712       continue;
1713     }
1714 
1715     // We post-increment Depth here because while isKnownNonZero increments it
1716     // as well, when we pop back up that increment won't persist. We don't want
1717     // to recurse 10k times just because we have 10k GEP operands. We don't
1718     // bail completely out because we want to handle constant GEPs regardless
1719     // of depth.
1720     if (Depth++ >= MaxDepth)
1721       continue;
1722 
1723     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1724       return true;
1725   }
1726 
1727   return false;
1728 }
1729 
1730 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1731 /// ensure that the value it's attached to is never Value?  'RangeType' is
1732 /// is the type of the value described by the range.
1733 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1734   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1735   assert(NumRanges >= 1);
1736   for (unsigned i = 0; i < NumRanges; ++i) {
1737     ConstantInt *Lower =
1738         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1739     ConstantInt *Upper =
1740         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1741     ConstantRange Range(Lower->getValue(), Upper->getValue());
1742     if (Range.contains(Value))
1743       return false;
1744   }
1745   return true;
1746 }
1747 
1748 /// Return true if the given value is known to be non-zero when defined.
1749 /// For vectors return true if every element is known to be non-zero when
1750 /// defined. Supports values with integer or pointer type and vectors of
1751 /// integers.
1752 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1753   if (auto *C = dyn_cast<Constant>(V)) {
1754     if (C->isNullValue())
1755       return false;
1756     if (isa<ConstantInt>(C))
1757       // Must be non-zero due to null test above.
1758       return true;
1759 
1760     // For constant vectors, check that all elements are undefined or known
1761     // non-zero to determine that the whole vector is known non-zero.
1762     if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1763       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1764         Constant *Elt = C->getAggregateElement(i);
1765         if (!Elt || Elt->isNullValue())
1766           return false;
1767         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1768           return false;
1769       }
1770       return true;
1771     }
1772 
1773     return false;
1774   }
1775 
1776   if (auto *I = dyn_cast<Instruction>(V)) {
1777     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1778       // If the possible ranges don't contain zero, then the value is
1779       // definitely non-zero.
1780       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1781         const APInt ZeroValue(Ty->getBitWidth(), 0);
1782         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1783           return true;
1784       }
1785     }
1786   }
1787 
1788   // The remaining tests are all recursive, so bail out if we hit the limit.
1789   if (Depth++ >= MaxDepth)
1790     return false;
1791 
1792   // Check for pointer simplifications.
1793   if (V->getType()->isPointerTy()) {
1794     if (isKnownNonNull(V))
1795       return true;
1796     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1797       if (isGEPKnownNonNull(GEP, Depth, Q))
1798         return true;
1799   }
1800 
1801   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1802 
1803   // X | Y != 0 if X != 0 or Y != 0.
1804   Value *X = nullptr, *Y = nullptr;
1805   if (match(V, m_Or(m_Value(X), m_Value(Y))))
1806     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1807 
1808   // ext X != 0 if X != 0.
1809   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1810     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1811 
1812   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
1813   // if the lowest bit is shifted off the end.
1814   if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1815     // shl nuw can't remove any non-zero bits.
1816     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1817     if (BO->hasNoUnsignedWrap())
1818       return isKnownNonZero(X, Depth, Q);
1819 
1820     APInt KnownZero(BitWidth, 0);
1821     APInt KnownOne(BitWidth, 0);
1822     computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1823     if (KnownOne[0])
1824       return true;
1825   }
1826   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
1827   // defined if the sign bit is shifted off the end.
1828   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1829     // shr exact can only shift out zero bits.
1830     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1831     if (BO->isExact())
1832       return isKnownNonZero(X, Depth, Q);
1833 
1834     bool XKnownNonNegative, XKnownNegative;
1835     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1836     if (XKnownNegative)
1837       return true;
1838 
1839     // If the shifter operand is a constant, and all of the bits shifted
1840     // out are known to be zero, and X is known non-zero then at least one
1841     // non-zero bit must remain.
1842     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1843       APInt KnownZero(BitWidth, 0);
1844       APInt KnownOne(BitWidth, 0);
1845       computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1846 
1847       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1848       // Is there a known one in the portion not shifted out?
1849       if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
1850         return true;
1851       // Are all the bits to be shifted out known zero?
1852       if (KnownZero.countTrailingOnes() >= ShiftVal)
1853         return isKnownNonZero(X, Depth, Q);
1854     }
1855   }
1856   // div exact can only produce a zero if the dividend is zero.
1857   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1858     return isKnownNonZero(X, Depth, Q);
1859   }
1860   // X + Y.
1861   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1862     bool XKnownNonNegative, XKnownNegative;
1863     bool YKnownNonNegative, YKnownNegative;
1864     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1865     ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Depth, Q);
1866 
1867     // If X and Y are both non-negative (as signed values) then their sum is not
1868     // zero unless both X and Y are zero.
1869     if (XKnownNonNegative && YKnownNonNegative)
1870       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1871         return true;
1872 
1873     // If X and Y are both negative (as signed values) then their sum is not
1874     // zero unless both X and Y equal INT_MIN.
1875     if (BitWidth && XKnownNegative && YKnownNegative) {
1876       APInt KnownZero(BitWidth, 0);
1877       APInt KnownOne(BitWidth, 0);
1878       APInt Mask = APInt::getSignedMaxValue(BitWidth);
1879       // The sign bit of X is set.  If some other bit is set then X is not equal
1880       // to INT_MIN.
1881       computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1882       if ((KnownOne & Mask) != 0)
1883         return true;
1884       // The sign bit of Y is set.  If some other bit is set then Y is not equal
1885       // to INT_MIN.
1886       computeKnownBits(Y, KnownZero, KnownOne, Depth, Q);
1887       if ((KnownOne & Mask) != 0)
1888         return true;
1889     }
1890 
1891     // The sum of a non-negative number and a power of two is not zero.
1892     if (XKnownNonNegative &&
1893         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1894       return true;
1895     if (YKnownNonNegative &&
1896         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1897       return true;
1898   }
1899   // X * Y.
1900   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1901     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1902     // If X and Y are non-zero then so is X * Y as long as the multiplication
1903     // does not overflow.
1904     if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1905         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1906       return true;
1907   }
1908   // (C ? X : Y) != 0 if X != 0 and Y != 0.
1909   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
1910     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1911         isKnownNonZero(SI->getFalseValue(), Depth, Q))
1912       return true;
1913   }
1914   // PHI
1915   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
1916     // Try and detect a recurrence that monotonically increases from a
1917     // starting value, as these are common as induction variables.
1918     if (PN->getNumIncomingValues() == 2) {
1919       Value *Start = PN->getIncomingValue(0);
1920       Value *Induction = PN->getIncomingValue(1);
1921       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1922         std::swap(Start, Induction);
1923       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1924         if (!C->isZero() && !C->isNegative()) {
1925           ConstantInt *X;
1926           if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1927                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1928               !X->isNegative())
1929             return true;
1930         }
1931       }
1932     }
1933     // Check if all incoming values are non-zero constant.
1934     bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
1935       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue();
1936     });
1937     if (AllNonZeroConstants)
1938       return true;
1939   }
1940 
1941   if (!BitWidth) return false;
1942   APInt KnownZero(BitWidth, 0);
1943   APInt KnownOne(BitWidth, 0);
1944   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
1945   return KnownOne != 0;
1946 }
1947 
1948 /// Return true if V2 == V1 + X, where X is known non-zero.
1949 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
1950   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
1951   if (!BO || BO->getOpcode() != Instruction::Add)
1952     return false;
1953   Value *Op = nullptr;
1954   if (V2 == BO->getOperand(0))
1955     Op = BO->getOperand(1);
1956   else if (V2 == BO->getOperand(1))
1957     Op = BO->getOperand(0);
1958   else
1959     return false;
1960   return isKnownNonZero(Op, 0, Q);
1961 }
1962 
1963 /// Return true if it is known that V1 != V2.
1964 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
1965   if (V1->getType()->isVectorTy() || V1 == V2)
1966     return false;
1967   if (V1->getType() != V2->getType())
1968     // We can't look through casts yet.
1969     return false;
1970   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
1971     return true;
1972 
1973   if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) {
1974     // Are any known bits in V1 contradictory to known bits in V2? If V1
1975     // has a known zero where V2 has a known one, they must not be equal.
1976     auto BitWidth = Ty->getBitWidth();
1977     APInt KnownZero1(BitWidth, 0);
1978     APInt KnownOne1(BitWidth, 0);
1979     computeKnownBits(V1, KnownZero1, KnownOne1, 0, Q);
1980     APInt KnownZero2(BitWidth, 0);
1981     APInt KnownOne2(BitWidth, 0);
1982     computeKnownBits(V2, KnownZero2, KnownOne2, 0, Q);
1983 
1984     auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1);
1985     if (OppositeBits.getBoolValue())
1986       return true;
1987   }
1988   return false;
1989 }
1990 
1991 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
1992 /// simplify operations downstream. Mask is known to be zero for bits that V
1993 /// cannot have.
1994 ///
1995 /// This function is defined on values with integer type, values with pointer
1996 /// type, and vectors of integers.  In the case
1997 /// where V is a vector, the mask, known zero, and known one values are the
1998 /// same width as the vector element, and the bit is set only if it is true
1999 /// for all of the elements in the vector.
2000 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2001                        const Query &Q) {
2002   APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
2003   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2004   return (KnownZero & Mask) == Mask;
2005 }
2006 
2007 /// For vector constants, loop over the elements and find the constant with the
2008 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2009 /// or if any element was not analyzed; otherwise, return the count for the
2010 /// element with the minimum number of sign bits.
2011 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2012                                                  unsigned TyBits) {
2013   const auto *CV = dyn_cast<Constant>(V);
2014   if (!CV || !CV->getType()->isVectorTy())
2015     return 0;
2016 
2017   unsigned MinSignBits = TyBits;
2018   unsigned NumElts = CV->getType()->getVectorNumElements();
2019   for (unsigned i = 0; i != NumElts; ++i) {
2020     // If we find a non-ConstantInt, bail out.
2021     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2022     if (!Elt)
2023       return 0;
2024 
2025     // If the sign bit is 1, flip the bits, so we always count leading zeros.
2026     APInt EltVal = Elt->getValue();
2027     if (EltVal.isNegative())
2028       EltVal = ~EltVal;
2029     MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros());
2030   }
2031 
2032   return MinSignBits;
2033 }
2034 
2035 /// Return the number of times the sign bit of the register is replicated into
2036 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2037 /// (itself), but other cases can give us information. For example, immediately
2038 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2039 /// other, so we return 3. For vectors, return the number of sign bits for the
2040 /// vector element with the mininum number of known sign bits.
2041 unsigned ComputeNumSignBits(const Value *V, unsigned Depth, const Query &Q) {
2042   unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2043   unsigned Tmp, Tmp2;
2044   unsigned FirstAnswer = 1;
2045 
2046   // Note that ConstantInt is handled by the general computeKnownBits case
2047   // below.
2048 
2049   if (Depth == 6)
2050     return 1;  // Limit search depth.
2051 
2052   const Operator *U = dyn_cast<Operator>(V);
2053   switch (Operator::getOpcode(V)) {
2054   default: break;
2055   case Instruction::SExt:
2056     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2057     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2058 
2059   case Instruction::SDiv: {
2060     const APInt *Denominator;
2061     // sdiv X, C -> adds log(C) sign bits.
2062     if (match(U->getOperand(1), m_APInt(Denominator))) {
2063 
2064       // Ignore non-positive denominator.
2065       if (!Denominator->isStrictlyPositive())
2066         break;
2067 
2068       // Calculate the incoming numerator bits.
2069       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2070 
2071       // Add floor(log(C)) bits to the numerator bits.
2072       return std::min(TyBits, NumBits + Denominator->logBase2());
2073     }
2074     break;
2075   }
2076 
2077   case Instruction::SRem: {
2078     const APInt *Denominator;
2079     // srem X, C -> we know that the result is within [-C+1,C) when C is a
2080     // positive constant.  This let us put a lower bound on the number of sign
2081     // bits.
2082     if (match(U->getOperand(1), m_APInt(Denominator))) {
2083 
2084       // Ignore non-positive denominator.
2085       if (!Denominator->isStrictlyPositive())
2086         break;
2087 
2088       // Calculate the incoming numerator bits. SRem by a positive constant
2089       // can't lower the number of sign bits.
2090       unsigned NumrBits =
2091           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2092 
2093       // Calculate the leading sign bit constraints by examining the
2094       // denominator.  Given that the denominator is positive, there are two
2095       // cases:
2096       //
2097       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
2098       //     (1 << ceilLogBase2(C)).
2099       //
2100       //  2. the numerator is negative.  Then the result range is (-C,0] and
2101       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2102       //
2103       // Thus a lower bound on the number of sign bits is `TyBits -
2104       // ceilLogBase2(C)`.
2105 
2106       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2107       return std::max(NumrBits, ResBits);
2108     }
2109     break;
2110   }
2111 
2112   case Instruction::AShr: {
2113     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2114     // ashr X, C   -> adds C sign bits.  Vectors too.
2115     const APInt *ShAmt;
2116     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2117       Tmp += ShAmt->getZExtValue();
2118       if (Tmp > TyBits) Tmp = TyBits;
2119     }
2120     return Tmp;
2121   }
2122   case Instruction::Shl: {
2123     const APInt *ShAmt;
2124     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2125       // shl destroys sign bits.
2126       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2127       Tmp2 = ShAmt->getZExtValue();
2128       if (Tmp2 >= TyBits ||      // Bad shift.
2129           Tmp2 >= Tmp) break;    // Shifted all sign bits out.
2130       return Tmp - Tmp2;
2131     }
2132     break;
2133   }
2134   case Instruction::And:
2135   case Instruction::Or:
2136   case Instruction::Xor:    // NOT is handled here.
2137     // Logical binary ops preserve the number of sign bits at the worst.
2138     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2139     if (Tmp != 1) {
2140       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2141       FirstAnswer = std::min(Tmp, Tmp2);
2142       // We computed what we know about the sign bits as our first
2143       // answer. Now proceed to the generic code that uses
2144       // computeKnownBits, and pick whichever answer is better.
2145     }
2146     break;
2147 
2148   case Instruction::Select:
2149     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2150     if (Tmp == 1) return 1;  // Early out.
2151     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2152     return std::min(Tmp, Tmp2);
2153 
2154   case Instruction::Add:
2155     // Add can have at most one carry bit.  Thus we know that the output
2156     // is, at worst, one more bit than the inputs.
2157     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2158     if (Tmp == 1) return 1;  // Early out.
2159 
2160     // Special case decrementing a value (ADD X, -1):
2161     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2162       if (CRHS->isAllOnesValue()) {
2163         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2164         computeKnownBits(U->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
2165 
2166         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2167         // sign bits set.
2168         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2169           return TyBits;
2170 
2171         // If we are subtracting one from a positive number, there is no carry
2172         // out of the result.
2173         if (KnownZero.isNegative())
2174           return Tmp;
2175       }
2176 
2177     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2178     if (Tmp2 == 1) return 1;
2179     return std::min(Tmp, Tmp2)-1;
2180 
2181   case Instruction::Sub:
2182     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2183     if (Tmp2 == 1) return 1;
2184 
2185     // Handle NEG.
2186     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2187       if (CLHS->isNullValue()) {
2188         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2189         computeKnownBits(U->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
2190         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2191         // sign bits set.
2192         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2193           return TyBits;
2194 
2195         // If the input is known to be positive (the sign bit is known clear),
2196         // the output of the NEG has the same number of sign bits as the input.
2197         if (KnownZero.isNegative())
2198           return Tmp2;
2199 
2200         // Otherwise, we treat this like a SUB.
2201       }
2202 
2203     // Sub can have at most one carry bit.  Thus we know that the output
2204     // is, at worst, one more bit than the inputs.
2205     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2206     if (Tmp == 1) return 1;  // Early out.
2207     return std::min(Tmp, Tmp2)-1;
2208 
2209   case Instruction::PHI: {
2210     const PHINode *PN = cast<PHINode>(U);
2211     unsigned NumIncomingValues = PN->getNumIncomingValues();
2212     // Don't analyze large in-degree PHIs.
2213     if (NumIncomingValues > 4) break;
2214     // Unreachable blocks may have zero-operand PHI nodes.
2215     if (NumIncomingValues == 0) break;
2216 
2217     // Take the minimum of all incoming values.  This can't infinitely loop
2218     // because of our depth threshold.
2219     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2220     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2221       if (Tmp == 1) return Tmp;
2222       Tmp = std::min(
2223           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2224     }
2225     return Tmp;
2226   }
2227 
2228   case Instruction::Trunc:
2229     // FIXME: it's tricky to do anything useful for this, but it is an important
2230     // case for targets like X86.
2231     break;
2232   }
2233 
2234   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2235   // use this information.
2236 
2237   // If we can examine all elements of a vector constant successfully, we're
2238   // done (we can't do any better than that). If not, keep trying.
2239   if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2240     return VecSignBits;
2241 
2242   APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2243   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2244 
2245   // If we know that the sign bit is either zero or one, determine the number of
2246   // identical bits in the top of the input value.
2247   if (KnownZero.isNegative())
2248     return std::max(FirstAnswer, KnownZero.countLeadingOnes());
2249 
2250   if (KnownOne.isNegative())
2251     return std::max(FirstAnswer, KnownOne.countLeadingOnes());
2252 
2253   // computeKnownBits gave us no extra information about the top bits.
2254   return FirstAnswer;
2255 }
2256 
2257 /// This function computes the integer multiple of Base that equals V.
2258 /// If successful, it returns true and returns the multiple in
2259 /// Multiple. If unsuccessful, it returns false. It looks
2260 /// through SExt instructions only if LookThroughSExt is true.
2261 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2262                            bool LookThroughSExt, unsigned Depth) {
2263   const unsigned MaxDepth = 6;
2264 
2265   assert(V && "No Value?");
2266   assert(Depth <= MaxDepth && "Limit Search Depth");
2267   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2268 
2269   Type *T = V->getType();
2270 
2271   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2272 
2273   if (Base == 0)
2274     return false;
2275 
2276   if (Base == 1) {
2277     Multiple = V;
2278     return true;
2279   }
2280 
2281   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2282   Constant *BaseVal = ConstantInt::get(T, Base);
2283   if (CO && CO == BaseVal) {
2284     // Multiple is 1.
2285     Multiple = ConstantInt::get(T, 1);
2286     return true;
2287   }
2288 
2289   if (CI && CI->getZExtValue() % Base == 0) {
2290     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2291     return true;
2292   }
2293 
2294   if (Depth == MaxDepth) return false;  // Limit search depth.
2295 
2296   Operator *I = dyn_cast<Operator>(V);
2297   if (!I) return false;
2298 
2299   switch (I->getOpcode()) {
2300   default: break;
2301   case Instruction::SExt:
2302     if (!LookThroughSExt) return false;
2303     // otherwise fall through to ZExt
2304   case Instruction::ZExt:
2305     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2306                            LookThroughSExt, Depth+1);
2307   case Instruction::Shl:
2308   case Instruction::Mul: {
2309     Value *Op0 = I->getOperand(0);
2310     Value *Op1 = I->getOperand(1);
2311 
2312     if (I->getOpcode() == Instruction::Shl) {
2313       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2314       if (!Op1CI) return false;
2315       // Turn Op0 << Op1 into Op0 * 2^Op1
2316       APInt Op1Int = Op1CI->getValue();
2317       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2318       APInt API(Op1Int.getBitWidth(), 0);
2319       API.setBit(BitToSet);
2320       Op1 = ConstantInt::get(V->getContext(), API);
2321     }
2322 
2323     Value *Mul0 = nullptr;
2324     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2325       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2326         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2327           if (Op1C->getType()->getPrimitiveSizeInBits() <
2328               MulC->getType()->getPrimitiveSizeInBits())
2329             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2330           if (Op1C->getType()->getPrimitiveSizeInBits() >
2331               MulC->getType()->getPrimitiveSizeInBits())
2332             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2333 
2334           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2335           Multiple = ConstantExpr::getMul(MulC, Op1C);
2336           return true;
2337         }
2338 
2339       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2340         if (Mul0CI->getValue() == 1) {
2341           // V == Base * Op1, so return Op1
2342           Multiple = Op1;
2343           return true;
2344         }
2345     }
2346 
2347     Value *Mul1 = nullptr;
2348     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2349       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2350         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2351           if (Op0C->getType()->getPrimitiveSizeInBits() <
2352               MulC->getType()->getPrimitiveSizeInBits())
2353             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2354           if (Op0C->getType()->getPrimitiveSizeInBits() >
2355               MulC->getType()->getPrimitiveSizeInBits())
2356             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2357 
2358           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2359           Multiple = ConstantExpr::getMul(MulC, Op0C);
2360           return true;
2361         }
2362 
2363       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2364         if (Mul1CI->getValue() == 1) {
2365           // V == Base * Op0, so return Op0
2366           Multiple = Op0;
2367           return true;
2368         }
2369     }
2370   }
2371   }
2372 
2373   // We could not determine if V is a multiple of Base.
2374   return false;
2375 }
2376 
2377 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2378                                             const TargetLibraryInfo *TLI) {
2379   const Function *F = ICS.getCalledFunction();
2380   if (!F)
2381     return Intrinsic::not_intrinsic;
2382 
2383   if (F->isIntrinsic())
2384     return F->getIntrinsicID();
2385 
2386   if (!TLI)
2387     return Intrinsic::not_intrinsic;
2388 
2389   LibFunc::Func Func;
2390   // We're going to make assumptions on the semantics of the functions, check
2391   // that the target knows that it's available in this environment and it does
2392   // not have local linkage.
2393   if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2394     return Intrinsic::not_intrinsic;
2395 
2396   if (!ICS.onlyReadsMemory())
2397     return Intrinsic::not_intrinsic;
2398 
2399   // Otherwise check if we have a call to a function that can be turned into a
2400   // vector intrinsic.
2401   switch (Func) {
2402   default:
2403     break;
2404   case LibFunc::sin:
2405   case LibFunc::sinf:
2406   case LibFunc::sinl:
2407     return Intrinsic::sin;
2408   case LibFunc::cos:
2409   case LibFunc::cosf:
2410   case LibFunc::cosl:
2411     return Intrinsic::cos;
2412   case LibFunc::exp:
2413   case LibFunc::expf:
2414   case LibFunc::expl:
2415     return Intrinsic::exp;
2416   case LibFunc::exp2:
2417   case LibFunc::exp2f:
2418   case LibFunc::exp2l:
2419     return Intrinsic::exp2;
2420   case LibFunc::log:
2421   case LibFunc::logf:
2422   case LibFunc::logl:
2423     return Intrinsic::log;
2424   case LibFunc::log10:
2425   case LibFunc::log10f:
2426   case LibFunc::log10l:
2427     return Intrinsic::log10;
2428   case LibFunc::log2:
2429   case LibFunc::log2f:
2430   case LibFunc::log2l:
2431     return Intrinsic::log2;
2432   case LibFunc::fabs:
2433   case LibFunc::fabsf:
2434   case LibFunc::fabsl:
2435     return Intrinsic::fabs;
2436   case LibFunc::fmin:
2437   case LibFunc::fminf:
2438   case LibFunc::fminl:
2439     return Intrinsic::minnum;
2440   case LibFunc::fmax:
2441   case LibFunc::fmaxf:
2442   case LibFunc::fmaxl:
2443     return Intrinsic::maxnum;
2444   case LibFunc::copysign:
2445   case LibFunc::copysignf:
2446   case LibFunc::copysignl:
2447     return Intrinsic::copysign;
2448   case LibFunc::floor:
2449   case LibFunc::floorf:
2450   case LibFunc::floorl:
2451     return Intrinsic::floor;
2452   case LibFunc::ceil:
2453   case LibFunc::ceilf:
2454   case LibFunc::ceill:
2455     return Intrinsic::ceil;
2456   case LibFunc::trunc:
2457   case LibFunc::truncf:
2458   case LibFunc::truncl:
2459     return Intrinsic::trunc;
2460   case LibFunc::rint:
2461   case LibFunc::rintf:
2462   case LibFunc::rintl:
2463     return Intrinsic::rint;
2464   case LibFunc::nearbyint:
2465   case LibFunc::nearbyintf:
2466   case LibFunc::nearbyintl:
2467     return Intrinsic::nearbyint;
2468   case LibFunc::round:
2469   case LibFunc::roundf:
2470   case LibFunc::roundl:
2471     return Intrinsic::round;
2472   case LibFunc::pow:
2473   case LibFunc::powf:
2474   case LibFunc::powl:
2475     return Intrinsic::pow;
2476   case LibFunc::sqrt:
2477   case LibFunc::sqrtf:
2478   case LibFunc::sqrtl:
2479     if (ICS->hasNoNaNs())
2480       return Intrinsic::sqrt;
2481     return Intrinsic::not_intrinsic;
2482   }
2483 
2484   return Intrinsic::not_intrinsic;
2485 }
2486 
2487 /// Return true if we can prove that the specified FP value is never equal to
2488 /// -0.0.
2489 ///
2490 /// NOTE: this function will need to be revisited when we support non-default
2491 /// rounding modes!
2492 ///
2493 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2494                                 unsigned Depth) {
2495   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2496     return !CFP->getValueAPF().isNegZero();
2497 
2498   // FIXME: Magic number! At the least, this should be given a name because it's
2499   // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to
2500   // expose it as a parameter, so it can be used for testing / experimenting.
2501   if (Depth == 6)
2502     return false;  // Limit search depth.
2503 
2504   const Operator *I = dyn_cast<Operator>(V);
2505   if (!I) return false;
2506 
2507   // Check if the nsz fast-math flag is set
2508   if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2509     if (FPO->hasNoSignedZeros())
2510       return true;
2511 
2512   // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2513   if (I->getOpcode() == Instruction::FAdd)
2514     if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2515       if (CFP->isNullValue())
2516         return true;
2517 
2518   // sitofp and uitofp turn into +0.0 for zero.
2519   if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2520     return true;
2521 
2522   if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2523     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2524     switch (IID) {
2525     default:
2526       break;
2527     // sqrt(-0.0) = -0.0, no other negative results are possible.
2528     case Intrinsic::sqrt:
2529       return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1);
2530     // fabs(x) != -0.0
2531     case Intrinsic::fabs:
2532       return true;
2533     }
2534   }
2535 
2536   return false;
2537 }
2538 
2539 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2540                                        const TargetLibraryInfo *TLI,
2541                                        unsigned Depth) {
2542   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2543     return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
2544 
2545   // FIXME: Magic number! At the least, this should be given a name because it's
2546   // used similarly in CannotBeNegativeZero(). A better fix may be to
2547   // expose it as a parameter, so it can be used for testing / experimenting.
2548   if (Depth == 6)
2549     return false;  // Limit search depth.
2550 
2551   const Operator *I = dyn_cast<Operator>(V);
2552   if (!I) return false;
2553 
2554   switch (I->getOpcode()) {
2555   default: break;
2556   // Unsigned integers are always nonnegative.
2557   case Instruction::UIToFP:
2558     return true;
2559   case Instruction::FMul:
2560     // x*x is always non-negative or a NaN.
2561     if (I->getOperand(0) == I->getOperand(1))
2562       return true;
2563     LLVM_FALLTHROUGH;
2564   case Instruction::FAdd:
2565   case Instruction::FDiv:
2566   case Instruction::FRem:
2567     return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
2568            CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2569   case Instruction::Select:
2570     return CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1) &&
2571            CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
2572   case Instruction::FPExt:
2573   case Instruction::FPTrunc:
2574     // Widening/narrowing never change sign.
2575     return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
2576   case Instruction::Call:
2577     Intrinsic::ID IID = getIntrinsicForCallSite(cast<CallInst>(I), TLI);
2578     switch (IID) {
2579     default:
2580       break;
2581     case Intrinsic::maxnum:
2582       return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) ||
2583              CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2584     case Intrinsic::minnum:
2585       return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
2586              CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
2587     case Intrinsic::exp:
2588     case Intrinsic::exp2:
2589     case Intrinsic::fabs:
2590     case Intrinsic::sqrt:
2591       return true;
2592     case Intrinsic::powi:
2593       if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
2594         // powi(x,n) is non-negative if n is even.
2595         if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
2596           return true;
2597       }
2598       return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
2599     case Intrinsic::fma:
2600     case Intrinsic::fmuladd:
2601       // x*x+y is non-negative if y is non-negative.
2602       return I->getOperand(0) == I->getOperand(1) &&
2603              CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
2604     }
2605     break;
2606   }
2607   return false;
2608 }
2609 
2610 /// If the specified value can be set by repeating the same byte in memory,
2611 /// return the i8 value that it is represented with.  This is
2612 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2613 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
2614 /// byte store (e.g. i16 0x1234), return null.
2615 Value *llvm::isBytewiseValue(Value *V) {
2616   // All byte-wide stores are splatable, even of arbitrary variables.
2617   if (V->getType()->isIntegerTy(8)) return V;
2618 
2619   // Handle 'null' ConstantArrayZero etc.
2620   if (Constant *C = dyn_cast<Constant>(V))
2621     if (C->isNullValue())
2622       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2623 
2624   // Constant float and double values can be handled as integer values if the
2625   // corresponding integer value is "byteable".  An important case is 0.0.
2626   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2627     if (CFP->getType()->isFloatTy())
2628       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2629     if (CFP->getType()->isDoubleTy())
2630       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2631     // Don't handle long double formats, which have strange constraints.
2632   }
2633 
2634   // We can handle constant integers that are multiple of 8 bits.
2635   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2636     if (CI->getBitWidth() % 8 == 0) {
2637       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2638 
2639       if (!CI->getValue().isSplat(8))
2640         return nullptr;
2641       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2642     }
2643   }
2644 
2645   // A ConstantDataArray/Vector is splatable if all its members are equal and
2646   // also splatable.
2647   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2648     Value *Elt = CA->getElementAsConstant(0);
2649     Value *Val = isBytewiseValue(Elt);
2650     if (!Val)
2651       return nullptr;
2652 
2653     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2654       if (CA->getElementAsConstant(I) != Elt)
2655         return nullptr;
2656 
2657     return Val;
2658   }
2659 
2660   // Conceptually, we could handle things like:
2661   //   %a = zext i8 %X to i16
2662   //   %b = shl i16 %a, 8
2663   //   %c = or i16 %a, %b
2664   // but until there is an example that actually needs this, it doesn't seem
2665   // worth worrying about.
2666   return nullptr;
2667 }
2668 
2669 
2670 // This is the recursive version of BuildSubAggregate. It takes a few different
2671 // arguments. Idxs is the index within the nested struct From that we are
2672 // looking at now (which is of type IndexedType). IdxSkip is the number of
2673 // indices from Idxs that should be left out when inserting into the resulting
2674 // struct. To is the result struct built so far, new insertvalue instructions
2675 // build on that.
2676 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2677                                 SmallVectorImpl<unsigned> &Idxs,
2678                                 unsigned IdxSkip,
2679                                 Instruction *InsertBefore) {
2680   llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2681   if (STy) {
2682     // Save the original To argument so we can modify it
2683     Value *OrigTo = To;
2684     // General case, the type indexed by Idxs is a struct
2685     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2686       // Process each struct element recursively
2687       Idxs.push_back(i);
2688       Value *PrevTo = To;
2689       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2690                              InsertBefore);
2691       Idxs.pop_back();
2692       if (!To) {
2693         // Couldn't find any inserted value for this index? Cleanup
2694         while (PrevTo != OrigTo) {
2695           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2696           PrevTo = Del->getAggregateOperand();
2697           Del->eraseFromParent();
2698         }
2699         // Stop processing elements
2700         break;
2701       }
2702     }
2703     // If we successfully found a value for each of our subaggregates
2704     if (To)
2705       return To;
2706   }
2707   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2708   // the struct's elements had a value that was inserted directly. In the latter
2709   // case, perhaps we can't determine each of the subelements individually, but
2710   // we might be able to find the complete struct somewhere.
2711 
2712   // Find the value that is at that particular spot
2713   Value *V = FindInsertedValue(From, Idxs);
2714 
2715   if (!V)
2716     return nullptr;
2717 
2718   // Insert the value in the new (sub) aggregrate
2719   return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2720                                        "tmp", InsertBefore);
2721 }
2722 
2723 // This helper takes a nested struct and extracts a part of it (which is again a
2724 // struct) into a new value. For example, given the struct:
2725 // { a, { b, { c, d }, e } }
2726 // and the indices "1, 1" this returns
2727 // { c, d }.
2728 //
2729 // It does this by inserting an insertvalue for each element in the resulting
2730 // struct, as opposed to just inserting a single struct. This will only work if
2731 // each of the elements of the substruct are known (ie, inserted into From by an
2732 // insertvalue instruction somewhere).
2733 //
2734 // All inserted insertvalue instructions are inserted before InsertBefore
2735 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2736                                 Instruction *InsertBefore) {
2737   assert(InsertBefore && "Must have someplace to insert!");
2738   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2739                                                              idx_range);
2740   Value *To = UndefValue::get(IndexedType);
2741   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2742   unsigned IdxSkip = Idxs.size();
2743 
2744   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2745 }
2746 
2747 /// Given an aggregrate and an sequence of indices, see if
2748 /// the scalar value indexed is already around as a register, for example if it
2749 /// were inserted directly into the aggregrate.
2750 ///
2751 /// If InsertBefore is not null, this function will duplicate (modified)
2752 /// insertvalues when a part of a nested struct is extracted.
2753 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2754                                Instruction *InsertBefore) {
2755   // Nothing to index? Just return V then (this is useful at the end of our
2756   // recursion).
2757   if (idx_range.empty())
2758     return V;
2759   // We have indices, so V should have an indexable type.
2760   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2761          "Not looking at a struct or array?");
2762   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2763          "Invalid indices for type?");
2764 
2765   if (Constant *C = dyn_cast<Constant>(V)) {
2766     C = C->getAggregateElement(idx_range[0]);
2767     if (!C) return nullptr;
2768     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2769   }
2770 
2771   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2772     // Loop the indices for the insertvalue instruction in parallel with the
2773     // requested indices
2774     const unsigned *req_idx = idx_range.begin();
2775     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2776          i != e; ++i, ++req_idx) {
2777       if (req_idx == idx_range.end()) {
2778         // We can't handle this without inserting insertvalues
2779         if (!InsertBefore)
2780           return nullptr;
2781 
2782         // The requested index identifies a part of a nested aggregate. Handle
2783         // this specially. For example,
2784         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2785         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2786         // %C = extractvalue {i32, { i32, i32 } } %B, 1
2787         // This can be changed into
2788         // %A = insertvalue {i32, i32 } undef, i32 10, 0
2789         // %C = insertvalue {i32, i32 } %A, i32 11, 1
2790         // which allows the unused 0,0 element from the nested struct to be
2791         // removed.
2792         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2793                                  InsertBefore);
2794       }
2795 
2796       // This insert value inserts something else than what we are looking for.
2797       // See if the (aggregate) value inserted into has the value we are
2798       // looking for, then.
2799       if (*req_idx != *i)
2800         return FindInsertedValue(I->getAggregateOperand(), idx_range,
2801                                  InsertBefore);
2802     }
2803     // If we end up here, the indices of the insertvalue match with those
2804     // requested (though possibly only partially). Now we recursively look at
2805     // the inserted value, passing any remaining indices.
2806     return FindInsertedValue(I->getInsertedValueOperand(),
2807                              makeArrayRef(req_idx, idx_range.end()),
2808                              InsertBefore);
2809   }
2810 
2811   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2812     // If we're extracting a value from an aggregate that was extracted from
2813     // something else, we can extract from that something else directly instead.
2814     // However, we will need to chain I's indices with the requested indices.
2815 
2816     // Calculate the number of indices required
2817     unsigned size = I->getNumIndices() + idx_range.size();
2818     // Allocate some space to put the new indices in
2819     SmallVector<unsigned, 5> Idxs;
2820     Idxs.reserve(size);
2821     // Add indices from the extract value instruction
2822     Idxs.append(I->idx_begin(), I->idx_end());
2823 
2824     // Add requested indices
2825     Idxs.append(idx_range.begin(), idx_range.end());
2826 
2827     assert(Idxs.size() == size
2828            && "Number of indices added not correct?");
2829 
2830     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2831   }
2832   // Otherwise, we don't know (such as, extracting from a function return value
2833   // or load instruction)
2834   return nullptr;
2835 }
2836 
2837 /// Analyze the specified pointer to see if it can be expressed as a base
2838 /// pointer plus a constant offset. Return the base and offset to the caller.
2839 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2840                                               const DataLayout &DL) {
2841   unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2842   APInt ByteOffset(BitWidth, 0);
2843 
2844   // We walk up the defs but use a visited set to handle unreachable code. In
2845   // that case, we stop after accumulating the cycle once (not that it
2846   // matters).
2847   SmallPtrSet<Value *, 16> Visited;
2848   while (Visited.insert(Ptr).second) {
2849     if (Ptr->getType()->isVectorTy())
2850       break;
2851 
2852     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2853       APInt GEPOffset(BitWidth, 0);
2854       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2855         break;
2856 
2857       ByteOffset += GEPOffset;
2858 
2859       Ptr = GEP->getPointerOperand();
2860     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2861                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2862       Ptr = cast<Operator>(Ptr)->getOperand(0);
2863     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2864       if (GA->isInterposable())
2865         break;
2866       Ptr = GA->getAliasee();
2867     } else {
2868       break;
2869     }
2870   }
2871   Offset = ByteOffset.getSExtValue();
2872   return Ptr;
2873 }
2874 
2875 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) {
2876   // Make sure the GEP has exactly three arguments.
2877   if (GEP->getNumOperands() != 3)
2878     return false;
2879 
2880   // Make sure the index-ee is a pointer to array of i8.
2881   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
2882   if (!AT || !AT->getElementType()->isIntegerTy(8))
2883     return false;
2884 
2885   // Check to make sure that the first operand of the GEP is an integer and
2886   // has value 0 so that we are sure we're indexing into the initializer.
2887   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
2888   if (!FirstIdx || !FirstIdx->isZero())
2889     return false;
2890 
2891   return true;
2892 }
2893 
2894 /// This function computes the length of a null-terminated C string pointed to
2895 /// by V. If successful, it returns true and returns the string in Str.
2896 /// If unsuccessful, it returns false.
2897 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
2898                                  uint64_t Offset, bool TrimAtNul) {
2899   assert(V);
2900 
2901   // Look through bitcast instructions and geps.
2902   V = V->stripPointerCasts();
2903 
2904   // If the value is a GEP instruction or constant expression, treat it as an
2905   // offset.
2906   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2907     // The GEP operator should be based on a pointer to string constant, and is
2908     // indexing into the string constant.
2909     if (!isGEPBasedOnPointerToString(GEP))
2910       return false;
2911 
2912     // If the second index isn't a ConstantInt, then this is a variable index
2913     // into the array.  If this occurs, we can't say anything meaningful about
2914     // the string.
2915     uint64_t StartIdx = 0;
2916     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
2917       StartIdx = CI->getZExtValue();
2918     else
2919       return false;
2920     return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
2921                                  TrimAtNul);
2922   }
2923 
2924   // The GEP instruction, constant or instruction, must reference a global
2925   // variable that is a constant and is initialized. The referenced constant
2926   // initializer is the array that we'll use for optimization.
2927   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2928   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
2929     return false;
2930 
2931   // Handle the all-zeros case.
2932   if (GV->getInitializer()->isNullValue()) {
2933     // This is a degenerate case. The initializer is constant zero so the
2934     // length of the string must be zero.
2935     Str = "";
2936     return true;
2937   }
2938 
2939   // This must be a ConstantDataArray.
2940   const auto *Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
2941   if (!Array || !Array->isString())
2942     return false;
2943 
2944   // Get the number of elements in the array.
2945   uint64_t NumElts = Array->getType()->getArrayNumElements();
2946 
2947   // Start out with the entire array in the StringRef.
2948   Str = Array->getAsString();
2949 
2950   if (Offset > NumElts)
2951     return false;
2952 
2953   // Skip over 'offset' bytes.
2954   Str = Str.substr(Offset);
2955 
2956   if (TrimAtNul) {
2957     // Trim off the \0 and anything after it.  If the array is not nul
2958     // terminated, we just return the whole end of string.  The client may know
2959     // some other way that the string is length-bound.
2960     Str = Str.substr(0, Str.find('\0'));
2961   }
2962   return true;
2963 }
2964 
2965 // These next two are very similar to the above, but also look through PHI
2966 // nodes.
2967 // TODO: See if we can integrate these two together.
2968 
2969 /// If we can compute the length of the string pointed to by
2970 /// the specified pointer, return 'len+1'.  If we can't, return 0.
2971 static uint64_t GetStringLengthH(const Value *V,
2972                                  SmallPtrSetImpl<const PHINode*> &PHIs) {
2973   // Look through noop bitcast instructions.
2974   V = V->stripPointerCasts();
2975 
2976   // If this is a PHI node, there are two cases: either we have already seen it
2977   // or we haven't.
2978   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2979     if (!PHIs.insert(PN).second)
2980       return ~0ULL;  // already in the set.
2981 
2982     // If it was new, see if all the input strings are the same length.
2983     uint64_t LenSoFar = ~0ULL;
2984     for (Value *IncValue : PN->incoming_values()) {
2985       uint64_t Len = GetStringLengthH(IncValue, PHIs);
2986       if (Len == 0) return 0; // Unknown length -> unknown.
2987 
2988       if (Len == ~0ULL) continue;
2989 
2990       if (Len != LenSoFar && LenSoFar != ~0ULL)
2991         return 0;    // Disagree -> unknown.
2992       LenSoFar = Len;
2993     }
2994 
2995     // Success, all agree.
2996     return LenSoFar;
2997   }
2998 
2999   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3000   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3001     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
3002     if (Len1 == 0) return 0;
3003     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
3004     if (Len2 == 0) return 0;
3005     if (Len1 == ~0ULL) return Len2;
3006     if (Len2 == ~0ULL) return Len1;
3007     if (Len1 != Len2) return 0;
3008     return Len1;
3009   }
3010 
3011   // Otherwise, see if we can read the string.
3012   StringRef StrData;
3013   if (!getConstantStringInfo(V, StrData))
3014     return 0;
3015 
3016   return StrData.size()+1;
3017 }
3018 
3019 /// If we can compute the length of the string pointed to by
3020 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3021 uint64_t llvm::GetStringLength(const Value *V) {
3022   if (!V->getType()->isPointerTy()) return 0;
3023 
3024   SmallPtrSet<const PHINode*, 32> PHIs;
3025   uint64_t Len = GetStringLengthH(V, PHIs);
3026   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3027   // an empty string as a length.
3028   return Len == ~0ULL ? 1 : Len;
3029 }
3030 
3031 /// \brief \p PN defines a loop-variant pointer to an object.  Check if the
3032 /// previous iteration of the loop was referring to the same object as \p PN.
3033 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3034                                          const LoopInfo *LI) {
3035   // Find the loop-defined value.
3036   Loop *L = LI->getLoopFor(PN->getParent());
3037   if (PN->getNumIncomingValues() != 2)
3038     return true;
3039 
3040   // Find the value from previous iteration.
3041   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3042   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3043     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3044   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3045     return true;
3046 
3047   // If a new pointer is loaded in the loop, the pointer references a different
3048   // object in every iteration.  E.g.:
3049   //    for (i)
3050   //       int *p = a[i];
3051   //       ...
3052   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3053     if (!L->isLoopInvariant(Load->getPointerOperand()))
3054       return false;
3055   return true;
3056 }
3057 
3058 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3059                                  unsigned MaxLookup) {
3060   if (!V->getType()->isPointerTy())
3061     return V;
3062   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3063     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3064       V = GEP->getPointerOperand();
3065     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3066                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3067       V = cast<Operator>(V)->getOperand(0);
3068     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3069       if (GA->isInterposable())
3070         return V;
3071       V = GA->getAliasee();
3072     } else {
3073       if (auto CS = CallSite(V))
3074         if (Value *RV = CS.getReturnedArgOperand()) {
3075           V = RV;
3076           continue;
3077         }
3078 
3079       // See if InstructionSimplify knows any relevant tricks.
3080       if (Instruction *I = dyn_cast<Instruction>(V))
3081         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3082         if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
3083           V = Simplified;
3084           continue;
3085         }
3086 
3087       return V;
3088     }
3089     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3090   }
3091   return V;
3092 }
3093 
3094 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3095                                 const DataLayout &DL, LoopInfo *LI,
3096                                 unsigned MaxLookup) {
3097   SmallPtrSet<Value *, 4> Visited;
3098   SmallVector<Value *, 4> Worklist;
3099   Worklist.push_back(V);
3100   do {
3101     Value *P = Worklist.pop_back_val();
3102     P = GetUnderlyingObject(P, DL, MaxLookup);
3103 
3104     if (!Visited.insert(P).second)
3105       continue;
3106 
3107     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3108       Worklist.push_back(SI->getTrueValue());
3109       Worklist.push_back(SI->getFalseValue());
3110       continue;
3111     }
3112 
3113     if (PHINode *PN = dyn_cast<PHINode>(P)) {
3114       // If this PHI changes the underlying object in every iteration of the
3115       // loop, don't look through it.  Consider:
3116       //   int **A;
3117       //   for (i) {
3118       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3119       //     Curr = A[i];
3120       //     *Prev, *Curr;
3121       //
3122       // Prev is tracking Curr one iteration behind so they refer to different
3123       // underlying objects.
3124       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3125           isSameUnderlyingObjectInLoop(PN, LI))
3126         for (Value *IncValue : PN->incoming_values())
3127           Worklist.push_back(IncValue);
3128       continue;
3129     }
3130 
3131     Objects.push_back(P);
3132   } while (!Worklist.empty());
3133 }
3134 
3135 /// Return true if the only users of this pointer are lifetime markers.
3136 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3137   for (const User *U : V->users()) {
3138     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3139     if (!II) return false;
3140 
3141     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3142         II->getIntrinsicID() != Intrinsic::lifetime_end)
3143       return false;
3144   }
3145   return true;
3146 }
3147 
3148 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3149                                         const Instruction *CtxI,
3150                                         const DominatorTree *DT) {
3151   const Operator *Inst = dyn_cast<Operator>(V);
3152   if (!Inst)
3153     return false;
3154 
3155   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3156     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3157       if (C->canTrap())
3158         return false;
3159 
3160   switch (Inst->getOpcode()) {
3161   default:
3162     return true;
3163   case Instruction::UDiv:
3164   case Instruction::URem: {
3165     // x / y is undefined if y == 0.
3166     const APInt *V;
3167     if (match(Inst->getOperand(1), m_APInt(V)))
3168       return *V != 0;
3169     return false;
3170   }
3171   case Instruction::SDiv:
3172   case Instruction::SRem: {
3173     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3174     const APInt *Numerator, *Denominator;
3175     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3176       return false;
3177     // We cannot hoist this division if the denominator is 0.
3178     if (*Denominator == 0)
3179       return false;
3180     // It's safe to hoist if the denominator is not 0 or -1.
3181     if (*Denominator != -1)
3182       return true;
3183     // At this point we know that the denominator is -1.  It is safe to hoist as
3184     // long we know that the numerator is not INT_MIN.
3185     if (match(Inst->getOperand(0), m_APInt(Numerator)))
3186       return !Numerator->isMinSignedValue();
3187     // The numerator *might* be MinSignedValue.
3188     return false;
3189   }
3190   case Instruction::Load: {
3191     const LoadInst *LI = cast<LoadInst>(Inst);
3192     if (!LI->isUnordered() ||
3193         // Speculative load may create a race that did not exist in the source.
3194         LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3195         // Speculative load may load data from dirty regions.
3196         LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress))
3197       return false;
3198     const DataLayout &DL = LI->getModule()->getDataLayout();
3199     return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3200                                               LI->getAlignment(), DL, CtxI, DT);
3201   }
3202   case Instruction::Call: {
3203     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
3204       switch (II->getIntrinsicID()) {
3205       // These synthetic intrinsics have no side-effects and just mark
3206       // information about their operands.
3207       // FIXME: There are other no-op synthetic instructions that potentially
3208       // should be considered at least *safe* to speculate...
3209       case Intrinsic::dbg_declare:
3210       case Intrinsic::dbg_value:
3211         return true;
3212 
3213       case Intrinsic::bswap:
3214       case Intrinsic::ctlz:
3215       case Intrinsic::ctpop:
3216       case Intrinsic::cttz:
3217       case Intrinsic::objectsize:
3218       case Intrinsic::sadd_with_overflow:
3219       case Intrinsic::smul_with_overflow:
3220       case Intrinsic::ssub_with_overflow:
3221       case Intrinsic::uadd_with_overflow:
3222       case Intrinsic::umul_with_overflow:
3223       case Intrinsic::usub_with_overflow:
3224         return true;
3225       // These intrinsics are defined to have the same behavior as libm
3226       // functions except for setting errno.
3227       case Intrinsic::sqrt:
3228       case Intrinsic::fma:
3229       case Intrinsic::fmuladd:
3230         return true;
3231       // These intrinsics are defined to have the same behavior as libm
3232       // functions, and the corresponding libm functions never set errno.
3233       case Intrinsic::trunc:
3234       case Intrinsic::copysign:
3235       case Intrinsic::fabs:
3236       case Intrinsic::minnum:
3237       case Intrinsic::maxnum:
3238         return true;
3239       // These intrinsics are defined to have the same behavior as libm
3240       // functions, which never overflow when operating on the IEEE754 types
3241       // that we support, and never set errno otherwise.
3242       case Intrinsic::ceil:
3243       case Intrinsic::floor:
3244       case Intrinsic::nearbyint:
3245       case Intrinsic::rint:
3246       case Intrinsic::round:
3247         return true;
3248       // TODO: are convert_{from,to}_fp16 safe?
3249       // TODO: can we list target-specific intrinsics here?
3250       default: break;
3251       }
3252     }
3253     return false; // The called function could have undefined behavior or
3254                   // side-effects, even if marked readnone nounwind.
3255   }
3256   case Instruction::VAArg:
3257   case Instruction::Alloca:
3258   case Instruction::Invoke:
3259   case Instruction::PHI:
3260   case Instruction::Store:
3261   case Instruction::Ret:
3262   case Instruction::Br:
3263   case Instruction::IndirectBr:
3264   case Instruction::Switch:
3265   case Instruction::Unreachable:
3266   case Instruction::Fence:
3267   case Instruction::AtomicRMW:
3268   case Instruction::AtomicCmpXchg:
3269   case Instruction::LandingPad:
3270   case Instruction::Resume:
3271   case Instruction::CatchSwitch:
3272   case Instruction::CatchPad:
3273   case Instruction::CatchRet:
3274   case Instruction::CleanupPad:
3275   case Instruction::CleanupRet:
3276     return false; // Misc instructions which have effects
3277   }
3278 }
3279 
3280 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3281   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3282 }
3283 
3284 /// Return true if we know that the specified value is never null.
3285 bool llvm::isKnownNonNull(const Value *V) {
3286   assert(V->getType()->isPointerTy() && "V must be pointer type");
3287 
3288   // Alloca never returns null, malloc might.
3289   if (isa<AllocaInst>(V)) return true;
3290 
3291   // A byval, inalloca, or nonnull argument is never null.
3292   if (const Argument *A = dyn_cast<Argument>(V))
3293     return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3294 
3295   // A global variable in address space 0 is non null unless extern weak.
3296   // Other address spaces may have null as a valid address for a global,
3297   // so we can't assume anything.
3298   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3299     return !GV->hasExternalWeakLinkage() &&
3300            GV->getType()->getAddressSpace() == 0;
3301 
3302   // A Load tagged with nonnull metadata is never null.
3303   if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3304     return LI->getMetadata(LLVMContext::MD_nonnull);
3305 
3306   if (auto CS = ImmutableCallSite(V))
3307     if (CS.isReturnNonNull())
3308       return true;
3309 
3310   return false;
3311 }
3312 
3313 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3314                                                   const Instruction *CtxI,
3315                                                   const DominatorTree *DT) {
3316   assert(V->getType()->isPointerTy() && "V must be pointer type");
3317 
3318   unsigned NumUsesExplored = 0;
3319   for (auto *U : V->users()) {
3320     // Avoid massive lists
3321     if (NumUsesExplored >= DomConditionsMaxUses)
3322       break;
3323     NumUsesExplored++;
3324     // Consider only compare instructions uniquely controlling a branch
3325     CmpInst::Predicate Pred;
3326     if (!match(const_cast<User *>(U),
3327                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
3328         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
3329       continue;
3330 
3331     for (auto *CmpU : U->users()) {
3332       if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
3333         assert(BI->isConditional() && "uses a comparison!");
3334 
3335         BasicBlock *NonNullSuccessor =
3336             BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
3337         BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3338         if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3339           return true;
3340       } else if (Pred == ICmpInst::ICMP_NE &&
3341                  match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
3342                  DT->dominates(cast<Instruction>(CmpU), CtxI)) {
3343         return true;
3344       }
3345     }
3346   }
3347 
3348   return false;
3349 }
3350 
3351 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3352                             const DominatorTree *DT) {
3353   if (isKnownNonNull(V))
3354     return true;
3355 
3356   return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
3357 }
3358 
3359 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3360                                                    const Value *RHS,
3361                                                    const DataLayout &DL,
3362                                                    AssumptionCache *AC,
3363                                                    const Instruction *CxtI,
3364                                                    const DominatorTree *DT) {
3365   // Multiplying n * m significant bits yields a result of n + m significant
3366   // bits. If the total number of significant bits does not exceed the
3367   // result bit width (minus 1), there is no overflow.
3368   // This means if we have enough leading zero bits in the operands
3369   // we can guarantee that the result does not overflow.
3370   // Ref: "Hacker's Delight" by Henry Warren
3371   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3372   APInt LHSKnownZero(BitWidth, 0);
3373   APInt LHSKnownOne(BitWidth, 0);
3374   APInt RHSKnownZero(BitWidth, 0);
3375   APInt RHSKnownOne(BitWidth, 0);
3376   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3377                    DT);
3378   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3379                    DT);
3380   // Note that underestimating the number of zero bits gives a more
3381   // conservative answer.
3382   unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
3383                       RHSKnownZero.countLeadingOnes();
3384   // First handle the easy case: if we have enough zero bits there's
3385   // definitely no overflow.
3386   if (ZeroBits >= BitWidth)
3387     return OverflowResult::NeverOverflows;
3388 
3389   // Get the largest possible values for each operand.
3390   APInt LHSMax = ~LHSKnownZero;
3391   APInt RHSMax = ~RHSKnownZero;
3392 
3393   // We know the multiply operation doesn't overflow if the maximum values for
3394   // each operand will not overflow after we multiply them together.
3395   bool MaxOverflow;
3396   LHSMax.umul_ov(RHSMax, MaxOverflow);
3397   if (!MaxOverflow)
3398     return OverflowResult::NeverOverflows;
3399 
3400   // We know it always overflows if multiplying the smallest possible values for
3401   // the operands also results in overflow.
3402   bool MinOverflow;
3403   LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
3404   if (MinOverflow)
3405     return OverflowResult::AlwaysOverflows;
3406 
3407   return OverflowResult::MayOverflow;
3408 }
3409 
3410 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3411                                                    const Value *RHS,
3412                                                    const DataLayout &DL,
3413                                                    AssumptionCache *AC,
3414                                                    const Instruction *CxtI,
3415                                                    const DominatorTree *DT) {
3416   bool LHSKnownNonNegative, LHSKnownNegative;
3417   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3418                  AC, CxtI, DT);
3419   if (LHSKnownNonNegative || LHSKnownNegative) {
3420     bool RHSKnownNonNegative, RHSKnownNegative;
3421     ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3422                    AC, CxtI, DT);
3423 
3424     if (LHSKnownNegative && RHSKnownNegative) {
3425       // The sign bit is set in both cases: this MUST overflow.
3426       // Create a simple add instruction, and insert it into the struct.
3427       return OverflowResult::AlwaysOverflows;
3428     }
3429 
3430     if (LHSKnownNonNegative && RHSKnownNonNegative) {
3431       // The sign bit is clear in both cases: this CANNOT overflow.
3432       // Create a simple add instruction, and insert it into the struct.
3433       return OverflowResult::NeverOverflows;
3434     }
3435   }
3436 
3437   return OverflowResult::MayOverflow;
3438 }
3439 
3440 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3441                                                   const Value *RHS,
3442                                                   const AddOperator *Add,
3443                                                   const DataLayout &DL,
3444                                                   AssumptionCache *AC,
3445                                                   const Instruction *CxtI,
3446                                                   const DominatorTree *DT) {
3447   if (Add && Add->hasNoSignedWrap()) {
3448     return OverflowResult::NeverOverflows;
3449   }
3450 
3451   bool LHSKnownNonNegative, LHSKnownNegative;
3452   bool RHSKnownNonNegative, RHSKnownNegative;
3453   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3454                  AC, CxtI, DT);
3455   ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3456                  AC, CxtI, DT);
3457 
3458   if ((LHSKnownNonNegative && RHSKnownNegative) ||
3459       (LHSKnownNegative && RHSKnownNonNegative)) {
3460     // The sign bits are opposite: this CANNOT overflow.
3461     return OverflowResult::NeverOverflows;
3462   }
3463 
3464   // The remaining code needs Add to be available. Early returns if not so.
3465   if (!Add)
3466     return OverflowResult::MayOverflow;
3467 
3468   // If the sign of Add is the same as at least one of the operands, this add
3469   // CANNOT overflow. This is particularly useful when the sum is
3470   // @llvm.assume'ed non-negative rather than proved so from analyzing its
3471   // operands.
3472   bool LHSOrRHSKnownNonNegative =
3473       (LHSKnownNonNegative || RHSKnownNonNegative);
3474   bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
3475   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3476     bool AddKnownNonNegative, AddKnownNegative;
3477     ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
3478                    /*Depth=*/0, AC, CxtI, DT);
3479     if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
3480         (AddKnownNegative && LHSOrRHSKnownNegative)) {
3481       return OverflowResult::NeverOverflows;
3482     }
3483   }
3484 
3485   return OverflowResult::MayOverflow;
3486 }
3487 
3488 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3489                                      const DominatorTree &DT) {
3490 #ifndef NDEBUG
3491   auto IID = II->getIntrinsicID();
3492   assert((IID == Intrinsic::sadd_with_overflow ||
3493           IID == Intrinsic::uadd_with_overflow ||
3494           IID == Intrinsic::ssub_with_overflow ||
3495           IID == Intrinsic::usub_with_overflow ||
3496           IID == Intrinsic::smul_with_overflow ||
3497           IID == Intrinsic::umul_with_overflow) &&
3498          "Not an overflow intrinsic!");
3499 #endif
3500 
3501   SmallVector<const BranchInst *, 2> GuardingBranches;
3502   SmallVector<const ExtractValueInst *, 2> Results;
3503 
3504   for (const User *U : II->users()) {
3505     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3506       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3507 
3508       if (EVI->getIndices()[0] == 0)
3509         Results.push_back(EVI);
3510       else {
3511         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3512 
3513         for (const auto *U : EVI->users())
3514           if (const auto *B = dyn_cast<BranchInst>(U)) {
3515             assert(B->isConditional() && "How else is it using an i1?");
3516             GuardingBranches.push_back(B);
3517           }
3518       }
3519     } else {
3520       // We are using the aggregate directly in a way we don't want to analyze
3521       // here (storing it to a global, say).
3522       return false;
3523     }
3524   }
3525 
3526   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3527     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3528     if (!NoWrapEdge.isSingleEdge())
3529       return false;
3530 
3531     // Check if all users of the add are provably no-wrap.
3532     for (const auto *Result : Results) {
3533       // If the extractvalue itself is not executed on overflow, the we don't
3534       // need to check each use separately, since domination is transitive.
3535       if (DT.dominates(NoWrapEdge, Result->getParent()))
3536         continue;
3537 
3538       for (auto &RU : Result->uses())
3539         if (!DT.dominates(NoWrapEdge, RU))
3540           return false;
3541     }
3542 
3543     return true;
3544   };
3545 
3546   return any_of(GuardingBranches, AllUsesGuardedByBranch);
3547 }
3548 
3549 
3550 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3551                                                  const DataLayout &DL,
3552                                                  AssumptionCache *AC,
3553                                                  const Instruction *CxtI,
3554                                                  const DominatorTree *DT) {
3555   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3556                                        Add, DL, AC, CxtI, DT);
3557 }
3558 
3559 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3560                                                  const Value *RHS,
3561                                                  const DataLayout &DL,
3562                                                  AssumptionCache *AC,
3563                                                  const Instruction *CxtI,
3564                                                  const DominatorTree *DT) {
3565   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3566 }
3567 
3568 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3569   // A memory operation returns normally if it isn't volatile. A volatile
3570   // operation is allowed to trap.
3571   //
3572   // An atomic operation isn't guaranteed to return in a reasonable amount of
3573   // time because it's possible for another thread to interfere with it for an
3574   // arbitrary length of time, but programs aren't allowed to rely on that.
3575   if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3576     return !LI->isVolatile();
3577   if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3578     return !SI->isVolatile();
3579   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3580     return !CXI->isVolatile();
3581   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3582     return !RMWI->isVolatile();
3583   if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3584     return !MII->isVolatile();
3585 
3586   // If there is no successor, then execution can't transfer to it.
3587   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3588     return !CRI->unwindsToCaller();
3589   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3590     return !CatchSwitch->unwindsToCaller();
3591   if (isa<ResumeInst>(I))
3592     return false;
3593   if (isa<ReturnInst>(I))
3594     return false;
3595 
3596   // Calls can throw, or contain an infinite loop, or kill the process.
3597   if (CallSite CS = CallSite(const_cast<Instruction*>(I))) {
3598     // Calls which don't write to arbitrary memory are safe.
3599     // FIXME: Ignoring infinite loops without any side-effects is too aggressive,
3600     // but it's consistent with other passes. See http://llvm.org/PR965 .
3601     // FIXME: This isn't aggressive enough; a call which only writes to a
3602     // global is guaranteed to return.
3603     return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3604            match(I, m_Intrinsic<Intrinsic::assume>());
3605   }
3606 
3607   // Other instructions return normally.
3608   return true;
3609 }
3610 
3611 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3612                                                   const Loop *L) {
3613   // The loop header is guaranteed to be executed for every iteration.
3614   //
3615   // FIXME: Relax this constraint to cover all basic blocks that are
3616   // guaranteed to be executed at every iteration.
3617   if (I->getParent() != L->getHeader()) return false;
3618 
3619   for (const Instruction &LI : *L->getHeader()) {
3620     if (&LI == I) return true;
3621     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3622   }
3623   llvm_unreachable("Instruction not contained in its own parent basic block.");
3624 }
3625 
3626 bool llvm::propagatesFullPoison(const Instruction *I) {
3627   switch (I->getOpcode()) {
3628     case Instruction::Add:
3629     case Instruction::Sub:
3630     case Instruction::Xor:
3631     case Instruction::Trunc:
3632     case Instruction::BitCast:
3633     case Instruction::AddrSpaceCast:
3634       // These operations all propagate poison unconditionally. Note that poison
3635       // is not any particular value, so xor or subtraction of poison with
3636       // itself still yields poison, not zero.
3637       return true;
3638 
3639     case Instruction::AShr:
3640     case Instruction::SExt:
3641       // For these operations, one bit of the input is replicated across
3642       // multiple output bits. A replicated poison bit is still poison.
3643       return true;
3644 
3645     case Instruction::Shl: {
3646       // Left shift *by* a poison value is poison. The number of
3647       // positions to shift is unsigned, so no negative values are
3648       // possible there. Left shift by zero places preserves poison. So
3649       // it only remains to consider left shift of poison by a positive
3650       // number of places.
3651       //
3652       // A left shift by a positive number of places leaves the lowest order bit
3653       // non-poisoned. However, if such a shift has a no-wrap flag, then we can
3654       // make the poison operand violate that flag, yielding a fresh full-poison
3655       // value.
3656       auto *OBO = cast<OverflowingBinaryOperator>(I);
3657       return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
3658     }
3659 
3660     case Instruction::Mul: {
3661       // A multiplication by zero yields a non-poison zero result, so we need to
3662       // rule out zero as an operand. Conservatively, multiplication by a
3663       // non-zero constant is not multiplication by zero.
3664       //
3665       // Multiplication by a non-zero constant can leave some bits
3666       // non-poisoned. For example, a multiplication by 2 leaves the lowest
3667       // order bit unpoisoned. So we need to consider that.
3668       //
3669       // Multiplication by 1 preserves poison. If the multiplication has a
3670       // no-wrap flag, then we can make the poison operand violate that flag
3671       // when multiplied by any integer other than 0 and 1.
3672       auto *OBO = cast<OverflowingBinaryOperator>(I);
3673       if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) {
3674         for (Value *V : OBO->operands()) {
3675           if (auto *CI = dyn_cast<ConstantInt>(V)) {
3676             // A ConstantInt cannot yield poison, so we can assume that it is
3677             // the other operand that is poison.
3678             return !CI->isZero();
3679           }
3680         }
3681       }
3682       return false;
3683     }
3684 
3685     case Instruction::ICmp:
3686       // Comparing poison with any value yields poison.  This is why, for
3687       // instance, x s< (x +nsw 1) can be folded to true.
3688       return true;
3689 
3690     case Instruction::GetElementPtr:
3691       // A GEP implicitly represents a sequence of additions, subtractions,
3692       // truncations, sign extensions and multiplications. The multiplications
3693       // are by the non-zero sizes of some set of types, so we do not have to be
3694       // concerned with multiplication by zero. If the GEP is in-bounds, then
3695       // these operations are implicitly no-signed-wrap so poison is propagated
3696       // by the arguments above for Add, Sub, Trunc, SExt and Mul.
3697       return cast<GEPOperator>(I)->isInBounds();
3698 
3699     default:
3700       return false;
3701   }
3702 }
3703 
3704 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3705   switch (I->getOpcode()) {
3706     case Instruction::Store:
3707       return cast<StoreInst>(I)->getPointerOperand();
3708 
3709     case Instruction::Load:
3710       return cast<LoadInst>(I)->getPointerOperand();
3711 
3712     case Instruction::AtomicCmpXchg:
3713       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3714 
3715     case Instruction::AtomicRMW:
3716       return cast<AtomicRMWInst>(I)->getPointerOperand();
3717 
3718     case Instruction::UDiv:
3719     case Instruction::SDiv:
3720     case Instruction::URem:
3721     case Instruction::SRem:
3722       return I->getOperand(1);
3723 
3724     default:
3725       return nullptr;
3726   }
3727 }
3728 
3729 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
3730   // We currently only look for uses of poison values within the same basic
3731   // block, as that makes it easier to guarantee that the uses will be
3732   // executed given that PoisonI is executed.
3733   //
3734   // FIXME: Expand this to consider uses beyond the same basic block. To do
3735   // this, look out for the distinction between post-dominance and strong
3736   // post-dominance.
3737   const BasicBlock *BB = PoisonI->getParent();
3738 
3739   // Set of instructions that we have proved will yield poison if PoisonI
3740   // does.
3741   SmallSet<const Value *, 16> YieldsPoison;
3742   SmallSet<const BasicBlock *, 4> Visited;
3743   YieldsPoison.insert(PoisonI);
3744   Visited.insert(PoisonI->getParent());
3745 
3746   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
3747 
3748   unsigned Iter = 0;
3749   while (Iter++ < MaxDepth) {
3750     for (auto &I : make_range(Begin, End)) {
3751       if (&I != PoisonI) {
3752         const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
3753         if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
3754           return true;
3755         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
3756           return false;
3757       }
3758 
3759       // Mark poison that propagates from I through uses of I.
3760       if (YieldsPoison.count(&I)) {
3761         for (const User *User : I.users()) {
3762           const Instruction *UserI = cast<Instruction>(User);
3763           if (propagatesFullPoison(UserI))
3764             YieldsPoison.insert(User);
3765         }
3766       }
3767     }
3768 
3769     if (auto *NextBB = BB->getSingleSuccessor()) {
3770       if (Visited.insert(NextBB).second) {
3771         BB = NextBB;
3772         Begin = BB->getFirstNonPHI()->getIterator();
3773         End = BB->end();
3774         continue;
3775       }
3776     }
3777 
3778     break;
3779   };
3780   return false;
3781 }
3782 
3783 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
3784   if (FMF.noNaNs())
3785     return true;
3786 
3787   if (auto *C = dyn_cast<ConstantFP>(V))
3788     return !C->isNaN();
3789   return false;
3790 }
3791 
3792 static bool isKnownNonZero(const Value *V) {
3793   if (auto *C = dyn_cast<ConstantFP>(V))
3794     return !C->isZero();
3795   return false;
3796 }
3797 
3798 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
3799                                               FastMathFlags FMF,
3800                                               Value *CmpLHS, Value *CmpRHS,
3801                                               Value *TrueVal, Value *FalseVal,
3802                                               Value *&LHS, Value *&RHS) {
3803   LHS = CmpLHS;
3804   RHS = CmpRHS;
3805 
3806   // If the predicate is an "or-equal"  (FP) predicate, then signed zeroes may
3807   // return inconsistent results between implementations.
3808   //   (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
3809   //   minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
3810   // Therefore we behave conservatively and only proceed if at least one of the
3811   // operands is known to not be zero, or if we don't care about signed zeroes.
3812   switch (Pred) {
3813   default: break;
3814   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
3815   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
3816     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
3817         !isKnownNonZero(CmpRHS))
3818       return {SPF_UNKNOWN, SPNB_NA, false};
3819   }
3820 
3821   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
3822   bool Ordered = false;
3823 
3824   // When given one NaN and one non-NaN input:
3825   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
3826   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
3827   //     ordered comparison fails), which could be NaN or non-NaN.
3828   // so here we discover exactly what NaN behavior is required/accepted.
3829   if (CmpInst::isFPPredicate(Pred)) {
3830     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
3831     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
3832 
3833     if (LHSSafe && RHSSafe) {
3834       // Both operands are known non-NaN.
3835       NaNBehavior = SPNB_RETURNS_ANY;
3836     } else if (CmpInst::isOrdered(Pred)) {
3837       // An ordered comparison will return false when given a NaN, so it
3838       // returns the RHS.
3839       Ordered = true;
3840       if (LHSSafe)
3841         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
3842         NaNBehavior = SPNB_RETURNS_NAN;
3843       else if (RHSSafe)
3844         NaNBehavior = SPNB_RETURNS_OTHER;
3845       else
3846         // Completely unsafe.
3847         return {SPF_UNKNOWN, SPNB_NA, false};
3848     } else {
3849       Ordered = false;
3850       // An unordered comparison will return true when given a NaN, so it
3851       // returns the LHS.
3852       if (LHSSafe)
3853         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
3854         NaNBehavior = SPNB_RETURNS_OTHER;
3855       else if (RHSSafe)
3856         NaNBehavior = SPNB_RETURNS_NAN;
3857       else
3858         // Completely unsafe.
3859         return {SPF_UNKNOWN, SPNB_NA, false};
3860     }
3861   }
3862 
3863   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
3864     std::swap(CmpLHS, CmpRHS);
3865     Pred = CmpInst::getSwappedPredicate(Pred);
3866     if (NaNBehavior == SPNB_RETURNS_NAN)
3867       NaNBehavior = SPNB_RETURNS_OTHER;
3868     else if (NaNBehavior == SPNB_RETURNS_OTHER)
3869       NaNBehavior = SPNB_RETURNS_NAN;
3870     Ordered = !Ordered;
3871   }
3872 
3873   // ([if]cmp X, Y) ? X : Y
3874   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
3875     switch (Pred) {
3876     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
3877     case ICmpInst::ICMP_UGT:
3878     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
3879     case ICmpInst::ICMP_SGT:
3880     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
3881     case ICmpInst::ICMP_ULT:
3882     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
3883     case ICmpInst::ICMP_SLT:
3884     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
3885     case FCmpInst::FCMP_UGT:
3886     case FCmpInst::FCMP_UGE:
3887     case FCmpInst::FCMP_OGT:
3888     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
3889     case FCmpInst::FCMP_ULT:
3890     case FCmpInst::FCMP_ULE:
3891     case FCmpInst::FCMP_OLT:
3892     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
3893     }
3894   }
3895 
3896   if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) {
3897     if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
3898         (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
3899 
3900       // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
3901       // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
3902       if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) {
3903         return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3904       }
3905 
3906       // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
3907       // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
3908       if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) {
3909         return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3910       }
3911     }
3912 
3913     // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C)
3914     if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) {
3915       if (Pred == ICmpInst::ICMP_SGT && C1->getType() == C2->getType() &&
3916           ~C1->getValue() == C2->getValue() &&
3917           (match(TrueVal, m_Not(m_Specific(CmpLHS))) ||
3918            match(CmpLHS, m_Not(m_Specific(TrueVal))))) {
3919         LHS = TrueVal;
3920         RHS = FalseVal;
3921         return {SPF_SMIN, SPNB_NA, false};
3922       }
3923     }
3924   }
3925 
3926   // TODO: (X > 4) ? X : 5   -->  (X >= 5) ? X : 5  -->  MAX(X, 5)
3927 
3928   return {SPF_UNKNOWN, SPNB_NA, false};
3929 }
3930 
3931 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
3932                               Instruction::CastOps *CastOp) {
3933   CastInst *CI = dyn_cast<CastInst>(V1);
3934   Constant *C = dyn_cast<Constant>(V2);
3935   if (!CI)
3936     return nullptr;
3937   *CastOp = CI->getOpcode();
3938 
3939   if (auto *CI2 = dyn_cast<CastInst>(V2)) {
3940     // If V1 and V2 are both the same cast from the same type, we can look
3941     // through V1.
3942     if (CI2->getOpcode() == CI->getOpcode() &&
3943         CI2->getSrcTy() == CI->getSrcTy())
3944       return CI2->getOperand(0);
3945     return nullptr;
3946   } else if (!C) {
3947     return nullptr;
3948   }
3949 
3950   Constant *CastedTo = nullptr;
3951 
3952   if (isa<ZExtInst>(CI) && CmpI->isUnsigned())
3953     CastedTo = ConstantExpr::getTrunc(C, CI->getSrcTy());
3954 
3955   if (isa<SExtInst>(CI) && CmpI->isSigned())
3956     CastedTo = ConstantExpr::getTrunc(C, CI->getSrcTy(), true);
3957 
3958   if (isa<TruncInst>(CI))
3959     CastedTo = ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned());
3960 
3961   if (isa<FPTruncInst>(CI))
3962     CastedTo = ConstantExpr::getFPExtend(C, CI->getSrcTy(), true);
3963 
3964   if (isa<FPExtInst>(CI))
3965     CastedTo = ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true);
3966 
3967   if (isa<FPToUIInst>(CI))
3968     CastedTo = ConstantExpr::getUIToFP(C, CI->getSrcTy(), true);
3969 
3970   if (isa<FPToSIInst>(CI))
3971     CastedTo = ConstantExpr::getSIToFP(C, CI->getSrcTy(), true);
3972 
3973   if (isa<UIToFPInst>(CI))
3974     CastedTo = ConstantExpr::getFPToUI(C, CI->getSrcTy(), true);
3975 
3976   if (isa<SIToFPInst>(CI))
3977     CastedTo = ConstantExpr::getFPToSI(C, CI->getSrcTy(), true);
3978 
3979   if (!CastedTo)
3980     return nullptr;
3981 
3982   Constant *CastedBack =
3983       ConstantExpr::getCast(CI->getOpcode(), CastedTo, C->getType(), true);
3984   // Make sure the cast doesn't lose any information.
3985   if (CastedBack != C)
3986     return nullptr;
3987 
3988   return CastedTo;
3989 }
3990 
3991 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
3992                                              Instruction::CastOps *CastOp) {
3993   SelectInst *SI = dyn_cast<SelectInst>(V);
3994   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
3995 
3996   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
3997   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
3998 
3999   CmpInst::Predicate Pred = CmpI->getPredicate();
4000   Value *CmpLHS = CmpI->getOperand(0);
4001   Value *CmpRHS = CmpI->getOperand(1);
4002   Value *TrueVal = SI->getTrueValue();
4003   Value *FalseVal = SI->getFalseValue();
4004   FastMathFlags FMF;
4005   if (isa<FPMathOperator>(CmpI))
4006     FMF = CmpI->getFastMathFlags();
4007 
4008   // Bail out early.
4009   if (CmpI->isEquality())
4010     return {SPF_UNKNOWN, SPNB_NA, false};
4011 
4012   // Deal with type mismatches.
4013   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4014     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
4015       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4016                                   cast<CastInst>(TrueVal)->getOperand(0), C,
4017                                   LHS, RHS);
4018     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
4019       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4020                                   C, cast<CastInst>(FalseVal)->getOperand(0),
4021                                   LHS, RHS);
4022   }
4023   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4024                               LHS, RHS);
4025 }
4026 
4027 ConstantRange llvm::getConstantRangeFromMetadata(const MDNode &Ranges) {
4028   const unsigned NumRanges = Ranges.getNumOperands() / 2;
4029   assert(NumRanges >= 1 && "Must have at least one range!");
4030   assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs");
4031 
4032   auto *FirstLow = mdconst::extract<ConstantInt>(Ranges.getOperand(0));
4033   auto *FirstHigh = mdconst::extract<ConstantInt>(Ranges.getOperand(1));
4034 
4035   ConstantRange CR(FirstLow->getValue(), FirstHigh->getValue());
4036 
4037   for (unsigned i = 1; i < NumRanges; ++i) {
4038     auto *Low = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
4039     auto *High = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
4040 
4041     // Note: unionWith will potentially create a range that contains values not
4042     // contained in any of the original N ranges.
4043     CR = CR.unionWith(ConstantRange(Low->getValue(), High->getValue()));
4044   }
4045 
4046   return CR;
4047 }
4048 
4049 /// Return true if "icmp Pred LHS RHS" is always true.
4050 static bool isTruePredicate(CmpInst::Predicate Pred,
4051                             const Value *LHS, const Value *RHS,
4052                             const DataLayout &DL, unsigned Depth,
4053                             AssumptionCache *AC, const Instruction *CxtI,
4054                             const DominatorTree *DT) {
4055   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4056   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4057     return true;
4058 
4059   switch (Pred) {
4060   default:
4061     return false;
4062 
4063   case CmpInst::ICMP_SLE: {
4064     const APInt *C;
4065 
4066     // LHS s<= LHS +_{nsw} C   if C >= 0
4067     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4068       return !C->isNegative();
4069     return false;
4070   }
4071 
4072   case CmpInst::ICMP_ULE: {
4073     const APInt *C;
4074 
4075     // LHS u<= LHS +_{nuw} C   for any C
4076     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4077       return true;
4078 
4079     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4080     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4081                                        const Value *&X,
4082                                        const APInt *&CA, const APInt *&CB) {
4083       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4084           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4085         return true;
4086 
4087       // If X & C == 0 then (X | C) == X +_{nuw} C
4088       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4089           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4090         unsigned BitWidth = CA->getBitWidth();
4091         APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
4092         computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
4093 
4094         if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
4095           return true;
4096       }
4097 
4098       return false;
4099     };
4100 
4101     const Value *X;
4102     const APInt *CLHS, *CRHS;
4103     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4104       return CLHS->ule(*CRHS);
4105 
4106     return false;
4107   }
4108   }
4109 }
4110 
4111 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4112 /// ALHS ARHS" is true.  Otherwise, return None.
4113 static Optional<bool>
4114 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4115                       const Value *ARHS, const Value *BLHS,
4116                       const Value *BRHS, const DataLayout &DL,
4117                       unsigned Depth, AssumptionCache *AC,
4118                       const Instruction *CxtI, const DominatorTree *DT) {
4119   switch (Pred) {
4120   default:
4121     return None;
4122 
4123   case CmpInst::ICMP_SLT:
4124   case CmpInst::ICMP_SLE:
4125     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
4126                         DT) &&
4127         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4128       return true;
4129     return None;
4130 
4131   case CmpInst::ICMP_ULT:
4132   case CmpInst::ICMP_ULE:
4133     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
4134                         DT) &&
4135         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4136       return true;
4137     return None;
4138   }
4139 }
4140 
4141 /// Return true if the operands of the two compares match.  IsSwappedOps is true
4142 /// when the operands match, but are swapped.
4143 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4144                           const Value *BLHS, const Value *BRHS,
4145                           bool &IsSwappedOps) {
4146 
4147   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4148   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4149   return IsMatchingOps || IsSwappedOps;
4150 }
4151 
4152 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4153 /// true.  Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4154 /// BRHS" is false.  Otherwise, return None if we can't infer anything.
4155 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4156                                                     const Value *ALHS,
4157                                                     const Value *ARHS,
4158                                                     CmpInst::Predicate BPred,
4159                                                     const Value *BLHS,
4160                                                     const Value *BRHS,
4161                                                     bool IsSwappedOps) {
4162   // Canonicalize the operands so they're matching.
4163   if (IsSwappedOps) {
4164     std::swap(BLHS, BRHS);
4165     BPred = ICmpInst::getSwappedPredicate(BPred);
4166   }
4167   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4168     return true;
4169   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4170     return false;
4171 
4172   return None;
4173 }
4174 
4175 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4176 /// true.  Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4177 /// C2" is false.  Otherwise, return None if we can't infer anything.
4178 static Optional<bool>
4179 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4180                                  const ConstantInt *C1,
4181                                  CmpInst::Predicate BPred,
4182                                  const Value *BLHS, const ConstantInt *C2) {
4183   assert(ALHS == BLHS && "LHS operands must match.");
4184   ConstantRange DomCR =
4185       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4186   ConstantRange CR =
4187       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4188   ConstantRange Intersection = DomCR.intersectWith(CR);
4189   ConstantRange Difference = DomCR.difference(CR);
4190   if (Intersection.isEmptySet())
4191     return false;
4192   if (Difference.isEmptySet())
4193     return true;
4194   return None;
4195 }
4196 
4197 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4198                                         const DataLayout &DL, bool InvertAPred,
4199                                         unsigned Depth, AssumptionCache *AC,
4200                                         const Instruction *CxtI,
4201                                         const DominatorTree *DT) {
4202   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example.
4203   if (LHS->getType() != RHS->getType())
4204     return None;
4205 
4206   Type *OpTy = LHS->getType();
4207   assert(OpTy->getScalarType()->isIntegerTy(1));
4208 
4209   // LHS ==> RHS by definition
4210   if (!InvertAPred && LHS == RHS)
4211     return true;
4212 
4213   if (OpTy->isVectorTy())
4214     // TODO: extending the code below to handle vectors
4215     return None;
4216   assert(OpTy->isIntegerTy(1) && "implied by above");
4217 
4218   ICmpInst::Predicate APred, BPred;
4219   Value *ALHS, *ARHS;
4220   Value *BLHS, *BRHS;
4221 
4222   if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
4223       !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
4224     return None;
4225 
4226   if (InvertAPred)
4227     APred = CmpInst::getInversePredicate(APred);
4228 
4229   // Can we infer anything when the two compares have matching operands?
4230   bool IsSwappedOps;
4231   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4232     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4233             APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4234       return Implication;
4235     // No amount of additional analysis will infer the second condition, so
4236     // early exit.
4237     return None;
4238   }
4239 
4240   // Can we infer anything when the LHS operands match and the RHS operands are
4241   // constants (not necessarily matching)?
4242   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4243     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4244             APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4245             cast<ConstantInt>(BRHS)))
4246       return Implication;
4247     // No amount of additional analysis will infer the second condition, so
4248     // early exit.
4249     return None;
4250   }
4251 
4252   if (APred == BPred)
4253     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
4254                                  CxtI, DT);
4255 
4256   return None;
4257 }
4258