1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/Loads.h"
21 #include "llvm/Analysis/LoopInfo.h"
22 #include "llvm/Analysis/MemoryBuiltins.h"
23 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
24 #include "llvm/Analysis/VectorUtils.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/GetElementPtrTypeIterator.h"
32 #include "llvm/IR/GlobalAlias.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/LLVMContext.h"
37 #include "llvm/IR/Metadata.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/IR/PatternMatch.h"
40 #include "llvm/IR/Statepoint.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/KnownBits.h"
43 #include "llvm/Support/MathExtras.h"
44 #include <algorithm>
45 #include <array>
46 #include <cstring>
47 using namespace llvm;
48 using namespace llvm::PatternMatch;
49 
50 const unsigned MaxDepth = 6;
51 
52 // Controls the number of uses of the value searched for possible
53 // dominating comparisons.
54 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
55                                               cl::Hidden, cl::init(20));
56 
57 // This optimization is known to cause performance regressions is some cases,
58 // keep it under a temporary flag for now.
59 static cl::opt<bool>
60 DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits",
61                               cl::Hidden, cl::init(true));
62 
63 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
64 /// returns the element type's bitwidth.
65 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
66   if (unsigned BitWidth = Ty->getScalarSizeInBits())
67     return BitWidth;
68 
69   return DL.getPointerTypeSizeInBits(Ty);
70 }
71 
72 namespace {
73 // Simplifying using an assume can only be done in a particular control-flow
74 // context (the context instruction provides that context). If an assume and
75 // the context instruction are not in the same block then the DT helps in
76 // figuring out if we can use it.
77 struct Query {
78   const DataLayout &DL;
79   AssumptionCache *AC;
80   const Instruction *CxtI;
81   const DominatorTree *DT;
82   // Unlike the other analyses, this may be a nullptr because not all clients
83   // provide it currently.
84   OptimizationRemarkEmitter *ORE;
85 
86   /// Set of assumptions that should be excluded from further queries.
87   /// This is because of the potential for mutual recursion to cause
88   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
89   /// classic case of this is assume(x = y), which will attempt to determine
90   /// bits in x from bits in y, which will attempt to determine bits in y from
91   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
92   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
93   /// (all of which can call computeKnownBits), and so on.
94   std::array<const Value *, MaxDepth> Excluded;
95   unsigned NumExcluded;
96 
97   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
98         const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
99       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), NumExcluded(0) {}
100 
101   Query(const Query &Q, const Value *NewExcl)
102       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
103         NumExcluded(Q.NumExcluded) {
104     Excluded = Q.Excluded;
105     Excluded[NumExcluded++] = NewExcl;
106     assert(NumExcluded <= Excluded.size());
107   }
108 
109   bool isExcluded(const Value *Value) const {
110     if (NumExcluded == 0)
111       return false;
112     auto End = Excluded.begin() + NumExcluded;
113     return std::find(Excluded.begin(), End, Value) != End;
114   }
115 };
116 } // end anonymous namespace
117 
118 // Given the provided Value and, potentially, a context instruction, return
119 // the preferred context instruction (if any).
120 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
121   // If we've been provided with a context instruction, then use that (provided
122   // it has been inserted).
123   if (CxtI && CxtI->getParent())
124     return CxtI;
125 
126   // If the value is really an already-inserted instruction, then use that.
127   CxtI = dyn_cast<Instruction>(V);
128   if (CxtI && CxtI->getParent())
129     return CxtI;
130 
131   return nullptr;
132 }
133 
134 static void computeKnownBits(const Value *V, KnownBits &Known,
135                              unsigned Depth, const Query &Q);
136 
137 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
138                             const DataLayout &DL, unsigned Depth,
139                             AssumptionCache *AC, const Instruction *CxtI,
140                             const DominatorTree *DT,
141                             OptimizationRemarkEmitter *ORE) {
142   ::computeKnownBits(V, Known, Depth,
143                      Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
144 }
145 
146 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
147                                   const Query &Q);
148 
149 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
150                                  unsigned Depth, AssumptionCache *AC,
151                                  const Instruction *CxtI,
152                                  const DominatorTree *DT,
153                                  OptimizationRemarkEmitter *ORE) {
154   return ::computeKnownBits(V, Depth,
155                             Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
156 }
157 
158 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
159                                const DataLayout &DL,
160                                AssumptionCache *AC, const Instruction *CxtI,
161                                const DominatorTree *DT) {
162   assert(LHS->getType() == RHS->getType() &&
163          "LHS and RHS should have the same type");
164   assert(LHS->getType()->isIntOrIntVectorTy() &&
165          "LHS and RHS should be integers");
166   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
167   KnownBits LHSKnown(IT->getBitWidth());
168   KnownBits RHSKnown(IT->getBitWidth());
169   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT);
170   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT);
171   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
172 }
173 
174 
175 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
176   for (const User *U : CxtI->users()) {
177     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
178       if (IC->isEquality())
179         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
180           if (C->isNullValue())
181             continue;
182     return false;
183   }
184   return true;
185 }
186 
187 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
188                                    const Query &Q);
189 
190 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
191                                   bool OrZero,
192                                   unsigned Depth, AssumptionCache *AC,
193                                   const Instruction *CxtI,
194                                   const DominatorTree *DT) {
195   return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
196                                   Query(DL, AC, safeCxtI(V, CxtI), DT));
197 }
198 
199 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
200 
201 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
202                           AssumptionCache *AC, const Instruction *CxtI,
203                           const DominatorTree *DT) {
204   return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
205 }
206 
207 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
208                               unsigned Depth,
209                               AssumptionCache *AC, const Instruction *CxtI,
210                               const DominatorTree *DT) {
211   KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
212   return Known.isNonNegative();
213 }
214 
215 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
216                            AssumptionCache *AC, const Instruction *CxtI,
217                            const DominatorTree *DT) {
218   if (auto *CI = dyn_cast<ConstantInt>(V))
219     return CI->getValue().isStrictlyPositive();
220 
221   // TODO: We'd doing two recursive queries here.  We should factor this such
222   // that only a single query is needed.
223   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
224     isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
225 }
226 
227 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
228                            AssumptionCache *AC, const Instruction *CxtI,
229                            const DominatorTree *DT) {
230   KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
231   return Known.isNegative();
232 }
233 
234 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
235 
236 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
237                            const DataLayout &DL,
238                            AssumptionCache *AC, const Instruction *CxtI,
239                            const DominatorTree *DT) {
240   return ::isKnownNonEqual(V1, V2, Query(DL, AC,
241                                          safeCxtI(V1, safeCxtI(V2, CxtI)),
242                                          DT));
243 }
244 
245 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
246                               const Query &Q);
247 
248 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
249                              const DataLayout &DL,
250                              unsigned Depth, AssumptionCache *AC,
251                              const Instruction *CxtI, const DominatorTree *DT) {
252   return ::MaskedValueIsZero(V, Mask, Depth,
253                              Query(DL, AC, safeCxtI(V, CxtI), DT));
254 }
255 
256 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
257                                    const Query &Q);
258 
259 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
260                                   unsigned Depth, AssumptionCache *AC,
261                                   const Instruction *CxtI,
262                                   const DominatorTree *DT) {
263   return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
264 }
265 
266 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
267                                    bool NSW,
268                                    KnownBits &KnownOut, KnownBits &Known2,
269                                    unsigned Depth, const Query &Q) {
270   unsigned BitWidth = KnownOut.getBitWidth();
271 
272   // If an initial sequence of bits in the result is not needed, the
273   // corresponding bits in the operands are not needed.
274   KnownBits LHSKnown(BitWidth);
275   computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
276   computeKnownBits(Op1, Known2, Depth + 1, Q);
277 
278   // Carry in a 1 for a subtract, rather than a 0.
279   uint64_t CarryIn = 0;
280   if (!Add) {
281     // Sum = LHS + ~RHS + 1
282     std::swap(Known2.Zero, Known2.One);
283     CarryIn = 1;
284   }
285 
286   APInt PossibleSumZero = ~LHSKnown.Zero + ~Known2.Zero + CarryIn;
287   APInt PossibleSumOne = LHSKnown.One + Known2.One + CarryIn;
288 
289   // Compute known bits of the carry.
290   APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnown.Zero ^ Known2.Zero);
291   APInt CarryKnownOne = PossibleSumOne ^ LHSKnown.One ^ Known2.One;
292 
293   // Compute set of known bits (where all three relevant bits are known).
294   APInt LHSKnownUnion = LHSKnown.Zero | LHSKnown.One;
295   APInt RHSKnownUnion = Known2.Zero | Known2.One;
296   APInt CarryKnownUnion = CarryKnownZero | CarryKnownOne;
297   APInt Known = LHSKnownUnion & RHSKnownUnion & CarryKnownUnion;
298 
299   assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
300          "known bits of sum differ");
301 
302   // Compute known bits of the result.
303   KnownOut.Zero = ~PossibleSumOne & Known;
304   KnownOut.One = PossibleSumOne & Known;
305 
306   // Are we still trying to solve for the sign bit?
307   if (!Known.isSignBitSet()) {
308     if (NSW) {
309       // Adding two non-negative numbers, or subtracting a negative number from
310       // a non-negative one, can't wrap into negative.
311       if (LHSKnown.isNonNegative() && Known2.isNonNegative())
312         KnownOut.makeNonNegative();
313       // Adding two negative numbers, or subtracting a non-negative number from
314       // a negative one, can't wrap into non-negative.
315       else if (LHSKnown.isNegative() && Known2.isNegative())
316         KnownOut.makeNegative();
317     }
318   }
319 }
320 
321 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
322                                 KnownBits &Known, KnownBits &Known2,
323                                 unsigned Depth, const Query &Q) {
324   unsigned BitWidth = Known.getBitWidth();
325   computeKnownBits(Op1, Known, Depth + 1, Q);
326   computeKnownBits(Op0, Known2, Depth + 1, Q);
327 
328   bool isKnownNegative = false;
329   bool isKnownNonNegative = false;
330   // If the multiplication is known not to overflow, compute the sign bit.
331   if (NSW) {
332     if (Op0 == Op1) {
333       // The product of a number with itself is non-negative.
334       isKnownNonNegative = true;
335     } else {
336       bool isKnownNonNegativeOp1 = Known.isNonNegative();
337       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
338       bool isKnownNegativeOp1 = Known.isNegative();
339       bool isKnownNegativeOp0 = Known2.isNegative();
340       // The product of two numbers with the same sign is non-negative.
341       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
342         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
343       // The product of a negative number and a non-negative number is either
344       // negative or zero.
345       if (!isKnownNonNegative)
346         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
347                            isKnownNonZero(Op0, Depth, Q)) ||
348                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
349                            isKnownNonZero(Op1, Depth, Q));
350     }
351   }
352 
353   // If low bits are zero in either operand, output low known-0 bits.
354   // Also compute a conservative estimate for high known-0 bits.
355   // More trickiness is possible, but this is sufficient for the
356   // interesting case of alignment computation.
357   unsigned TrailZ = Known.countMinTrailingZeros() +
358                     Known2.countMinTrailingZeros();
359   unsigned LeadZ =  std::max(Known.countMinLeadingZeros() +
360                              Known2.countMinLeadingZeros(),
361                              BitWidth) - BitWidth;
362 
363   TrailZ = std::min(TrailZ, BitWidth);
364   LeadZ = std::min(LeadZ, BitWidth);
365   Known.resetAll();
366   Known.Zero.setLowBits(TrailZ);
367   Known.Zero.setHighBits(LeadZ);
368 
369   // Only make use of no-wrap flags if we failed to compute the sign bit
370   // directly.  This matters if the multiplication always overflows, in
371   // which case we prefer to follow the result of the direct computation,
372   // though as the program is invoking undefined behaviour we can choose
373   // whatever we like here.
374   if (isKnownNonNegative && !Known.isNegative())
375     Known.makeNonNegative();
376   else if (isKnownNegative && !Known.isNonNegative())
377     Known.makeNegative();
378 }
379 
380 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
381                                              KnownBits &Known) {
382   unsigned BitWidth = Known.getBitWidth();
383   unsigned NumRanges = Ranges.getNumOperands() / 2;
384   assert(NumRanges >= 1);
385 
386   Known.Zero.setAllBits();
387   Known.One.setAllBits();
388 
389   for (unsigned i = 0; i < NumRanges; ++i) {
390     ConstantInt *Lower =
391         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
392     ConstantInt *Upper =
393         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
394     ConstantRange Range(Lower->getValue(), Upper->getValue());
395 
396     // The first CommonPrefixBits of all values in Range are equal.
397     unsigned CommonPrefixBits =
398         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
399 
400     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
401     Known.One &= Range.getUnsignedMax() & Mask;
402     Known.Zero &= ~Range.getUnsignedMax() & Mask;
403   }
404 }
405 
406 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
407   SmallVector<const Value *, 16> WorkSet(1, I);
408   SmallPtrSet<const Value *, 32> Visited;
409   SmallPtrSet<const Value *, 16> EphValues;
410 
411   // The instruction defining an assumption's condition itself is always
412   // considered ephemeral to that assumption (even if it has other
413   // non-ephemeral users). See r246696's test case for an example.
414   if (is_contained(I->operands(), E))
415     return true;
416 
417   while (!WorkSet.empty()) {
418     const Value *V = WorkSet.pop_back_val();
419     if (!Visited.insert(V).second)
420       continue;
421 
422     // If all uses of this value are ephemeral, then so is this value.
423     if (all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) {
424       if (V == E)
425         return true;
426 
427       EphValues.insert(V);
428       if (const User *U = dyn_cast<User>(V))
429         for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
430              J != JE; ++J) {
431           if (isSafeToSpeculativelyExecute(*J))
432             WorkSet.push_back(*J);
433         }
434     }
435   }
436 
437   return false;
438 }
439 
440 // Is this an intrinsic that cannot be speculated but also cannot trap?
441 static bool isAssumeLikeIntrinsic(const Instruction *I) {
442   if (const CallInst *CI = dyn_cast<CallInst>(I))
443     if (Function *F = CI->getCalledFunction())
444       switch (F->getIntrinsicID()) {
445       default: break;
446       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
447       case Intrinsic::assume:
448       case Intrinsic::dbg_declare:
449       case Intrinsic::dbg_value:
450       case Intrinsic::invariant_start:
451       case Intrinsic::invariant_end:
452       case Intrinsic::lifetime_start:
453       case Intrinsic::lifetime_end:
454       case Intrinsic::objectsize:
455       case Intrinsic::ptr_annotation:
456       case Intrinsic::var_annotation:
457         return true;
458       }
459 
460   return false;
461 }
462 
463 bool llvm::isValidAssumeForContext(const Instruction *Inv,
464                                    const Instruction *CxtI,
465                                    const DominatorTree *DT) {
466 
467   // There are two restrictions on the use of an assume:
468   //  1. The assume must dominate the context (or the control flow must
469   //     reach the assume whenever it reaches the context).
470   //  2. The context must not be in the assume's set of ephemeral values
471   //     (otherwise we will use the assume to prove that the condition
472   //     feeding the assume is trivially true, thus causing the removal of
473   //     the assume).
474 
475   if (DT) {
476     if (DT->dominates(Inv, CxtI))
477       return true;
478   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
479     // We don't have a DT, but this trivially dominates.
480     return true;
481   }
482 
483   // With or without a DT, the only remaining case we will check is if the
484   // instructions are in the same BB.  Give up if that is not the case.
485   if (Inv->getParent() != CxtI->getParent())
486     return false;
487 
488   // If we have a dom tree, then we now know that the assume doens't dominate
489   // the other instruction.  If we don't have a dom tree then we can check if
490   // the assume is first in the BB.
491   if (!DT) {
492     // Search forward from the assume until we reach the context (or the end
493     // of the block); the common case is that the assume will come first.
494     for (auto I = std::next(BasicBlock::const_iterator(Inv)),
495          IE = Inv->getParent()->end(); I != IE; ++I)
496       if (&*I == CxtI)
497         return true;
498   }
499 
500   // The context comes first, but they're both in the same block. Make sure
501   // there is nothing in between that might interrupt the control flow.
502   for (BasicBlock::const_iterator I =
503          std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
504        I != IE; ++I)
505     if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
506       return false;
507 
508   return !isEphemeralValueOf(Inv, CxtI);
509 }
510 
511 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
512                                        unsigned Depth, const Query &Q) {
513   // Use of assumptions is context-sensitive. If we don't have a context, we
514   // cannot use them!
515   if (!Q.AC || !Q.CxtI)
516     return;
517 
518   unsigned BitWidth = Known.getBitWidth();
519 
520   // Note that the patterns below need to be kept in sync with the code
521   // in AssumptionCache::updateAffectedValues.
522 
523   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
524     if (!AssumeVH)
525       continue;
526     CallInst *I = cast<CallInst>(AssumeVH);
527     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
528            "Got assumption for the wrong function!");
529     if (Q.isExcluded(I))
530       continue;
531 
532     // Warning: This loop can end up being somewhat performance sensetive.
533     // We're running this loop for once for each value queried resulting in a
534     // runtime of ~O(#assumes * #values).
535 
536     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
537            "must be an assume intrinsic");
538 
539     Value *Arg = I->getArgOperand(0);
540 
541     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
542       assert(BitWidth == 1 && "assume operand is not i1?");
543       Known.setAllOnes();
544       return;
545     }
546     if (match(Arg, m_Not(m_Specific(V))) &&
547         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
548       assert(BitWidth == 1 && "assume operand is not i1?");
549       Known.setAllZero();
550       return;
551     }
552 
553     // The remaining tests are all recursive, so bail out if we hit the limit.
554     if (Depth == MaxDepth)
555       continue;
556 
557     Value *A, *B;
558     auto m_V = m_CombineOr(m_Specific(V),
559                            m_CombineOr(m_PtrToInt(m_Specific(V)),
560                            m_BitCast(m_Specific(V))));
561 
562     CmpInst::Predicate Pred;
563     ConstantInt *C;
564     // assume(v = a)
565     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
566         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
567       KnownBits RHSKnown(BitWidth);
568       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
569       Known.Zero |= RHSKnown.Zero;
570       Known.One  |= RHSKnown.One;
571     // assume(v & b = a)
572     } else if (match(Arg,
573                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
574                Pred == ICmpInst::ICMP_EQ &&
575                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
576       KnownBits RHSKnown(BitWidth);
577       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
578       KnownBits MaskKnown(BitWidth);
579       computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
580 
581       // For those bits in the mask that are known to be one, we can propagate
582       // known bits from the RHS to V.
583       Known.Zero |= RHSKnown.Zero & MaskKnown.One;
584       Known.One  |= RHSKnown.One  & MaskKnown.One;
585     // assume(~(v & b) = a)
586     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
587                                    m_Value(A))) &&
588                Pred == ICmpInst::ICMP_EQ &&
589                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
590       KnownBits RHSKnown(BitWidth);
591       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
592       KnownBits MaskKnown(BitWidth);
593       computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
594 
595       // For those bits in the mask that are known to be one, we can propagate
596       // inverted known bits from the RHS to V.
597       Known.Zero |= RHSKnown.One  & MaskKnown.One;
598       Known.One  |= RHSKnown.Zero & MaskKnown.One;
599     // assume(v | b = a)
600     } else if (match(Arg,
601                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
602                Pred == ICmpInst::ICMP_EQ &&
603                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
604       KnownBits RHSKnown(BitWidth);
605       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
606       KnownBits BKnown(BitWidth);
607       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
608 
609       // For those bits in B that are known to be zero, we can propagate known
610       // bits from the RHS to V.
611       Known.Zero |= RHSKnown.Zero & BKnown.Zero;
612       Known.One  |= RHSKnown.One  & BKnown.Zero;
613     // assume(~(v | b) = a)
614     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
615                                    m_Value(A))) &&
616                Pred == ICmpInst::ICMP_EQ &&
617                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
618       KnownBits RHSKnown(BitWidth);
619       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
620       KnownBits BKnown(BitWidth);
621       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
622 
623       // For those bits in B that are known to be zero, we can propagate
624       // inverted known bits from the RHS to V.
625       Known.Zero |= RHSKnown.One  & BKnown.Zero;
626       Known.One  |= RHSKnown.Zero & BKnown.Zero;
627     // assume(v ^ b = a)
628     } else if (match(Arg,
629                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
630                Pred == ICmpInst::ICMP_EQ &&
631                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
632       KnownBits RHSKnown(BitWidth);
633       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
634       KnownBits BKnown(BitWidth);
635       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
636 
637       // For those bits in B that are known to be zero, we can propagate known
638       // bits from the RHS to V. For those bits in B that are known to be one,
639       // we can propagate inverted known bits from the RHS to V.
640       Known.Zero |= RHSKnown.Zero & BKnown.Zero;
641       Known.One  |= RHSKnown.One  & BKnown.Zero;
642       Known.Zero |= RHSKnown.One  & BKnown.One;
643       Known.One  |= RHSKnown.Zero & BKnown.One;
644     // assume(~(v ^ b) = a)
645     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
646                                    m_Value(A))) &&
647                Pred == ICmpInst::ICMP_EQ &&
648                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
649       KnownBits RHSKnown(BitWidth);
650       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
651       KnownBits BKnown(BitWidth);
652       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
653 
654       // For those bits in B that are known to be zero, we can propagate
655       // inverted known bits from the RHS to V. For those bits in B that are
656       // known to be one, we can propagate known bits from the RHS to V.
657       Known.Zero |= RHSKnown.One  & BKnown.Zero;
658       Known.One  |= RHSKnown.Zero & BKnown.Zero;
659       Known.Zero |= RHSKnown.Zero & BKnown.One;
660       Known.One  |= RHSKnown.One  & BKnown.One;
661     // assume(v << c = a)
662     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
663                                    m_Value(A))) &&
664                Pred == ICmpInst::ICMP_EQ &&
665                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
666       KnownBits RHSKnown(BitWidth);
667       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
668       // For those bits in RHS that are known, we can propagate them to known
669       // bits in V shifted to the right by C.
670       RHSKnown.Zero.lshrInPlace(C->getZExtValue());
671       Known.Zero |= RHSKnown.Zero;
672       RHSKnown.One.lshrInPlace(C->getZExtValue());
673       Known.One  |= RHSKnown.One;
674     // assume(~(v << c) = a)
675     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
676                                    m_Value(A))) &&
677                Pred == ICmpInst::ICMP_EQ &&
678                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
679       KnownBits RHSKnown(BitWidth);
680       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
681       // For those bits in RHS that are known, we can propagate them inverted
682       // to known bits in V shifted to the right by C.
683       RHSKnown.One.lshrInPlace(C->getZExtValue());
684       Known.Zero |= RHSKnown.One;
685       RHSKnown.Zero.lshrInPlace(C->getZExtValue());
686       Known.One  |= RHSKnown.Zero;
687     // assume(v >> c = a)
688     } else if (match(Arg,
689                      m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
690                                                 m_AShr(m_V, m_ConstantInt(C))),
691                               m_Value(A))) &&
692                Pred == ICmpInst::ICMP_EQ &&
693                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
694       KnownBits RHSKnown(BitWidth);
695       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
696       // For those bits in RHS that are known, we can propagate them to known
697       // bits in V shifted to the right by C.
698       Known.Zero |= RHSKnown.Zero << C->getZExtValue();
699       Known.One  |= RHSKnown.One  << C->getZExtValue();
700     // assume(~(v >> c) = a)
701     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
702                                              m_LShr(m_V, m_ConstantInt(C)),
703                                              m_AShr(m_V, m_ConstantInt(C)))),
704                                    m_Value(A))) &&
705                Pred == ICmpInst::ICMP_EQ &&
706                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
707       KnownBits RHSKnown(BitWidth);
708       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
709       // For those bits in RHS that are known, we can propagate them inverted
710       // to known bits in V shifted to the right by C.
711       Known.Zero |= RHSKnown.One  << C->getZExtValue();
712       Known.One  |= RHSKnown.Zero << C->getZExtValue();
713     // assume(v >=_s c) where c is non-negative
714     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
715                Pred == ICmpInst::ICMP_SGE &&
716                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
717       KnownBits RHSKnown(BitWidth);
718       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
719 
720       if (RHSKnown.isNonNegative()) {
721         // We know that the sign bit is zero.
722         Known.makeNonNegative();
723       }
724     // assume(v >_s c) where c is at least -1.
725     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
726                Pred == ICmpInst::ICMP_SGT &&
727                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
728       KnownBits RHSKnown(BitWidth);
729       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
730 
731       if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
732         // We know that the sign bit is zero.
733         Known.makeNonNegative();
734       }
735     // assume(v <=_s c) where c is negative
736     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
737                Pred == ICmpInst::ICMP_SLE &&
738                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
739       KnownBits RHSKnown(BitWidth);
740       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
741 
742       if (RHSKnown.isNegative()) {
743         // We know that the sign bit is one.
744         Known.makeNegative();
745       }
746     // assume(v <_s c) where c is non-positive
747     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
748                Pred == ICmpInst::ICMP_SLT &&
749                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
750       KnownBits RHSKnown(BitWidth);
751       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
752 
753       if (RHSKnown.isZero() || RHSKnown.isNegative()) {
754         // We know that the sign bit is one.
755         Known.makeNegative();
756       }
757     // assume(v <=_u c)
758     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
759                Pred == ICmpInst::ICMP_ULE &&
760                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
761       KnownBits RHSKnown(BitWidth);
762       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
763 
764       // Whatever high bits in c are zero are known to be zero.
765       Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
766       // assume(v <_u c)
767     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
768                Pred == ICmpInst::ICMP_ULT &&
769                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
770       KnownBits RHSKnown(BitWidth);
771       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
772 
773       // Whatever high bits in c are zero are known to be zero (if c is a power
774       // of 2, then one more).
775       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
776         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
777       else
778         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
779     }
780   }
781 
782   // If assumptions conflict with each other or previous known bits, then we
783   // have a logical fallacy. It's possible that the assumption is not reachable,
784   // so this isn't a real bug. On the other hand, the program may have undefined
785   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
786   // clear out the known bits, try to warn the user, and hope for the best.
787   if (Known.Zero.intersects(Known.One)) {
788     Known.resetAll();
789 
790     if (Q.ORE) {
791       auto *CxtI = const_cast<Instruction *>(Q.CxtI);
792       OptimizationRemarkAnalysis ORA("value-tracking", "BadAssumption", CxtI);
793       Q.ORE->emit(ORA << "Detected conflicting code assumptions. Program may "
794                          "have undefined behavior, or compiler may have "
795                          "internal error.");
796     }
797   }
798 }
799 
800 // Compute known bits from a shift operator, including those with a
801 // non-constant shift amount. Known is the outputs of this function. Known2 is a
802 // pre-allocated temporary with the/ same bit width as Known. KZF and KOF are
803 // operator-specific functors that, given the known-zero or known-one bits
804 // respectively, and a shift amount, compute the implied known-zero or known-one
805 // bits of the shift operator's result respectively for that shift amount. The
806 // results from calling KZF and KOF are conservatively combined for all
807 // permitted shift amounts.
808 static void computeKnownBitsFromShiftOperator(
809     const Operator *I, KnownBits &Known, KnownBits &Known2,
810     unsigned Depth, const Query &Q,
811     function_ref<APInt(const APInt &, unsigned)> KZF,
812     function_ref<APInt(const APInt &, unsigned)> KOF) {
813   unsigned BitWidth = Known.getBitWidth();
814 
815   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
816     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
817 
818     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
819     Known.Zero = KZF(Known.Zero, ShiftAmt);
820     Known.One  = KOF(Known.One, ShiftAmt);
821     // If there is conflict between Known.Zero and Known.One, this must be an
822     // overflowing left shift, so the shift result is undefined. Clear Known
823     // bits so that other code could propagate this undef.
824     if ((Known.Zero & Known.One) != 0)
825       Known.resetAll();
826 
827     return;
828   }
829 
830   computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
831 
832   // If the shift amount could be greater than or equal to the bit-width of the LHS, the
833   // value could be undef, so we don't know anything about it.
834   if ((~Known.Zero).uge(BitWidth)) {
835     Known.resetAll();
836     return;
837   }
838 
839   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
840   // BitWidth > 64 and any upper bits are known, we'll end up returning the
841   // limit value (which implies all bits are known).
842   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
843   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
844 
845   // It would be more-clearly correct to use the two temporaries for this
846   // calculation. Reusing the APInts here to prevent unnecessary allocations.
847   Known.resetAll();
848 
849   // If we know the shifter operand is nonzero, we can sometimes infer more
850   // known bits. However this is expensive to compute, so be lazy about it and
851   // only compute it when absolutely necessary.
852   Optional<bool> ShifterOperandIsNonZero;
853 
854   // Early exit if we can't constrain any well-defined shift amount.
855   if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
856     ShifterOperandIsNonZero =
857         isKnownNonZero(I->getOperand(1), Depth + 1, Q);
858     if (!*ShifterOperandIsNonZero)
859       return;
860   }
861 
862   computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
863 
864   Known.Zero.setAllBits();
865   Known.One.setAllBits();
866   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
867     // Combine the shifted known input bits only for those shift amounts
868     // compatible with its known constraints.
869     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
870       continue;
871     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
872       continue;
873     // If we know the shifter is nonzero, we may be able to infer more known
874     // bits. This check is sunk down as far as possible to avoid the expensive
875     // call to isKnownNonZero if the cheaper checks above fail.
876     if (ShiftAmt == 0) {
877       if (!ShifterOperandIsNonZero.hasValue())
878         ShifterOperandIsNonZero =
879             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
880       if (*ShifterOperandIsNonZero)
881         continue;
882     }
883 
884     Known.Zero &= KZF(Known2.Zero, ShiftAmt);
885     Known.One  &= KOF(Known2.One, ShiftAmt);
886   }
887 
888   // If there are no compatible shift amounts, then we've proven that the shift
889   // amount must be >= the BitWidth, and the result is undefined. We could
890   // return anything we'd like, but we need to make sure the sets of known bits
891   // stay disjoint (it should be better for some other code to actually
892   // propagate the undef than to pick a value here using known bits).
893   if (Known.Zero.intersects(Known.One))
894     Known.resetAll();
895 }
896 
897 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
898                                          unsigned Depth, const Query &Q) {
899   unsigned BitWidth = Known.getBitWidth();
900 
901   KnownBits Known2(Known);
902   switch (I->getOpcode()) {
903   default: break;
904   case Instruction::Load:
905     if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
906       computeKnownBitsFromRangeMetadata(*MD, Known);
907     break;
908   case Instruction::And: {
909     // If either the LHS or the RHS are Zero, the result is zero.
910     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
911     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
912 
913     // Output known-1 bits are only known if set in both the LHS & RHS.
914     Known.One &= Known2.One;
915     // Output known-0 are known to be clear if zero in either the LHS | RHS.
916     Known.Zero |= Known2.Zero;
917 
918     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
919     // here we handle the more general case of adding any odd number by
920     // matching the form add(x, add(x, y)) where y is odd.
921     // TODO: This could be generalized to clearing any bit set in y where the
922     // following bit is known to be unset in y.
923     Value *Y = nullptr;
924     if (!Known.Zero[0] && !Known.One[0] &&
925         (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
926                                        m_Value(Y))) ||
927          match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
928                                        m_Value(Y))))) {
929       Known2.resetAll();
930       computeKnownBits(Y, Known2, Depth + 1, Q);
931       if (Known2.countMinTrailingOnes() > 0)
932         Known.Zero.setBit(0);
933     }
934     break;
935   }
936   case Instruction::Or: {
937     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
938     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
939 
940     // Output known-0 bits are only known if clear in both the LHS & RHS.
941     Known.Zero &= Known2.Zero;
942     // Output known-1 are known to be set if set in either the LHS | RHS.
943     Known.One |= Known2.One;
944     break;
945   }
946   case Instruction::Xor: {
947     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
948     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
949 
950     // Output known-0 bits are known if clear or set in both the LHS & RHS.
951     APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
952     // Output known-1 are known to be set if set in only one of the LHS, RHS.
953     Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
954     Known.Zero = std::move(KnownZeroOut);
955     break;
956   }
957   case Instruction::Mul: {
958     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
959     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
960                         Known2, Depth, Q);
961     break;
962   }
963   case Instruction::UDiv: {
964     // For the purposes of computing leading zeros we can conservatively
965     // treat a udiv as a logical right shift by the power of 2 known to
966     // be less than the denominator.
967     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
968     unsigned LeadZ = Known2.countMinLeadingZeros();
969 
970     Known2.resetAll();
971     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
972     unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
973     if (RHSMaxLeadingZeros != BitWidth)
974       LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
975 
976     Known.Zero.setHighBits(LeadZ);
977     break;
978   }
979   case Instruction::Select: {
980     const Value *LHS, *RHS;
981     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
982     if (SelectPatternResult::isMinOrMax(SPF)) {
983       computeKnownBits(RHS, Known, Depth + 1, Q);
984       computeKnownBits(LHS, Known2, Depth + 1, Q);
985     } else {
986       computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
987       computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
988     }
989 
990     unsigned MaxHighOnes = 0;
991     unsigned MaxHighZeros = 0;
992     if (SPF == SPF_SMAX) {
993       // If both sides are negative, the result is negative.
994       if (Known.isNegative() && Known2.isNegative())
995         // We can derive a lower bound on the result by taking the max of the
996         // leading one bits.
997         MaxHighOnes =
998             std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
999       // If either side is non-negative, the result is non-negative.
1000       else if (Known.isNonNegative() || Known2.isNonNegative())
1001         MaxHighZeros = 1;
1002     } else if (SPF == SPF_SMIN) {
1003       // If both sides are non-negative, the result is non-negative.
1004       if (Known.isNonNegative() && Known2.isNonNegative())
1005         // We can derive an upper bound on the result by taking the max of the
1006         // leading zero bits.
1007         MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1008                                 Known2.countMinLeadingZeros());
1009       // If either side is negative, the result is negative.
1010       else if (Known.isNegative() || Known2.isNegative())
1011         MaxHighOnes = 1;
1012     } else if (SPF == SPF_UMAX) {
1013       // We can derive a lower bound on the result by taking the max of the
1014       // leading one bits.
1015       MaxHighOnes =
1016           std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1017     } else if (SPF == SPF_UMIN) {
1018       // We can derive an upper bound on the result by taking the max of the
1019       // leading zero bits.
1020       MaxHighZeros =
1021           std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1022     }
1023 
1024     // Only known if known in both the LHS and RHS.
1025     Known.One &= Known2.One;
1026     Known.Zero &= Known2.Zero;
1027     if (MaxHighOnes > 0)
1028       Known.One.setHighBits(MaxHighOnes);
1029     if (MaxHighZeros > 0)
1030       Known.Zero.setHighBits(MaxHighZeros);
1031     break;
1032   }
1033   case Instruction::FPTrunc:
1034   case Instruction::FPExt:
1035   case Instruction::FPToUI:
1036   case Instruction::FPToSI:
1037   case Instruction::SIToFP:
1038   case Instruction::UIToFP:
1039     break; // Can't work with floating point.
1040   case Instruction::PtrToInt:
1041   case Instruction::IntToPtr:
1042     // Fall through and handle them the same as zext/trunc.
1043     LLVM_FALLTHROUGH;
1044   case Instruction::ZExt:
1045   case Instruction::Trunc: {
1046     Type *SrcTy = I->getOperand(0)->getType();
1047 
1048     unsigned SrcBitWidth;
1049     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1050     // which fall through here.
1051     SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1052 
1053     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1054     Known = Known.zextOrTrunc(SrcBitWidth);
1055     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1056     Known = Known.zextOrTrunc(BitWidth);
1057     // Any top bits are known to be zero.
1058     if (BitWidth > SrcBitWidth)
1059       Known.Zero.setBitsFrom(SrcBitWidth);
1060     break;
1061   }
1062   case Instruction::BitCast: {
1063     Type *SrcTy = I->getOperand(0)->getType();
1064     if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1065         // TODO: For now, not handling conversions like:
1066         // (bitcast i64 %x to <2 x i32>)
1067         !I->getType()->isVectorTy()) {
1068       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1069       break;
1070     }
1071     break;
1072   }
1073   case Instruction::SExt: {
1074     // Compute the bits in the result that are not present in the input.
1075     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1076 
1077     Known = Known.trunc(SrcBitWidth);
1078     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1079     // If the sign bit of the input is known set or clear, then we know the
1080     // top bits of the result.
1081     Known = Known.sext(BitWidth);
1082     break;
1083   }
1084   case Instruction::Shl: {
1085     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1086     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1087     auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1088       APInt KZResult = KnownZero << ShiftAmt;
1089       KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1090       // If this shift has "nsw" keyword, then the result is either a poison
1091       // value or has the same sign bit as the first operand.
1092       if (NSW && KnownZero.isSignBitSet())
1093         KZResult.setSignBit();
1094       return KZResult;
1095     };
1096 
1097     auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1098       APInt KOResult = KnownOne << ShiftAmt;
1099       if (NSW && KnownOne.isSignBitSet())
1100         KOResult.setSignBit();
1101       return KOResult;
1102     };
1103 
1104     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1105     break;
1106   }
1107   case Instruction::LShr: {
1108     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1109     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1110       APInt KZResult = KnownZero.lshr(ShiftAmt);
1111       // High bits known zero.
1112       KZResult.setHighBits(ShiftAmt);
1113       return KZResult;
1114     };
1115 
1116     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1117       return KnownOne.lshr(ShiftAmt);
1118     };
1119 
1120     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1121     break;
1122   }
1123   case Instruction::AShr: {
1124     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1125     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1126       return KnownZero.ashr(ShiftAmt);
1127     };
1128 
1129     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1130       return KnownOne.ashr(ShiftAmt);
1131     };
1132 
1133     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1134     break;
1135   }
1136   case Instruction::Sub: {
1137     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1138     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1139                            Known, Known2, Depth, Q);
1140     break;
1141   }
1142   case Instruction::Add: {
1143     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1144     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1145                            Known, Known2, Depth, Q);
1146     break;
1147   }
1148   case Instruction::SRem:
1149     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1150       APInt RA = Rem->getValue().abs();
1151       if (RA.isPowerOf2()) {
1152         APInt LowBits = RA - 1;
1153         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1154 
1155         // The low bits of the first operand are unchanged by the srem.
1156         Known.Zero = Known2.Zero & LowBits;
1157         Known.One = Known2.One & LowBits;
1158 
1159         // If the first operand is non-negative or has all low bits zero, then
1160         // the upper bits are all zero.
1161         if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1162           Known.Zero |= ~LowBits;
1163 
1164         // If the first operand is negative and not all low bits are zero, then
1165         // the upper bits are all one.
1166         if (Known2.isNegative() && LowBits.intersects(Known2.One))
1167           Known.One |= ~LowBits;
1168 
1169         assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1170         break;
1171       }
1172     }
1173 
1174     // The sign bit is the LHS's sign bit, except when the result of the
1175     // remainder is zero.
1176     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1177     // If it's known zero, our sign bit is also zero.
1178     if (Known2.isNonNegative())
1179       Known.makeNonNegative();
1180 
1181     break;
1182   case Instruction::URem: {
1183     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1184       const APInt &RA = Rem->getValue();
1185       if (RA.isPowerOf2()) {
1186         APInt LowBits = (RA - 1);
1187         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1188         Known.Zero |= ~LowBits;
1189         Known.One &= LowBits;
1190         break;
1191       }
1192     }
1193 
1194     // Since the result is less than or equal to either operand, any leading
1195     // zero bits in either operand must also exist in the result.
1196     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1197     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1198 
1199     unsigned Leaders =
1200         std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1201     Known.resetAll();
1202     Known.Zero.setHighBits(Leaders);
1203     break;
1204   }
1205 
1206   case Instruction::Alloca: {
1207     const AllocaInst *AI = cast<AllocaInst>(I);
1208     unsigned Align = AI->getAlignment();
1209     if (Align == 0)
1210       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1211 
1212     if (Align > 0)
1213       Known.Zero.setLowBits(countTrailingZeros(Align));
1214     break;
1215   }
1216   case Instruction::GetElementPtr: {
1217     // Analyze all of the subscripts of this getelementptr instruction
1218     // to determine if we can prove known low zero bits.
1219     KnownBits LocalKnown(BitWidth);
1220     computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1221     unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1222 
1223     gep_type_iterator GTI = gep_type_begin(I);
1224     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1225       Value *Index = I->getOperand(i);
1226       if (StructType *STy = GTI.getStructTypeOrNull()) {
1227         // Handle struct member offset arithmetic.
1228 
1229         // Handle case when index is vector zeroinitializer
1230         Constant *CIndex = cast<Constant>(Index);
1231         if (CIndex->isZeroValue())
1232           continue;
1233 
1234         if (CIndex->getType()->isVectorTy())
1235           Index = CIndex->getSplatValue();
1236 
1237         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1238         const StructLayout *SL = Q.DL.getStructLayout(STy);
1239         uint64_t Offset = SL->getElementOffset(Idx);
1240         TrailZ = std::min<unsigned>(TrailZ,
1241                                     countTrailingZeros(Offset));
1242       } else {
1243         // Handle array index arithmetic.
1244         Type *IndexedTy = GTI.getIndexedType();
1245         if (!IndexedTy->isSized()) {
1246           TrailZ = 0;
1247           break;
1248         }
1249         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1250         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1251         LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1252         computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1253         TrailZ = std::min(TrailZ,
1254                           unsigned(countTrailingZeros(TypeSize) +
1255                                    LocalKnown.countMinTrailingZeros()));
1256       }
1257     }
1258 
1259     Known.Zero.setLowBits(TrailZ);
1260     break;
1261   }
1262   case Instruction::PHI: {
1263     const PHINode *P = cast<PHINode>(I);
1264     // Handle the case of a simple two-predecessor recurrence PHI.
1265     // There's a lot more that could theoretically be done here, but
1266     // this is sufficient to catch some interesting cases.
1267     if (P->getNumIncomingValues() == 2) {
1268       for (unsigned i = 0; i != 2; ++i) {
1269         Value *L = P->getIncomingValue(i);
1270         Value *R = P->getIncomingValue(!i);
1271         Operator *LU = dyn_cast<Operator>(L);
1272         if (!LU)
1273           continue;
1274         unsigned Opcode = LU->getOpcode();
1275         // Check for operations that have the property that if
1276         // both their operands have low zero bits, the result
1277         // will have low zero bits.
1278         if (Opcode == Instruction::Add ||
1279             Opcode == Instruction::Sub ||
1280             Opcode == Instruction::And ||
1281             Opcode == Instruction::Or ||
1282             Opcode == Instruction::Mul) {
1283           Value *LL = LU->getOperand(0);
1284           Value *LR = LU->getOperand(1);
1285           // Find a recurrence.
1286           if (LL == I)
1287             L = LR;
1288           else if (LR == I)
1289             L = LL;
1290           else
1291             break;
1292           // Ok, we have a PHI of the form L op= R. Check for low
1293           // zero bits.
1294           computeKnownBits(R, Known2, Depth + 1, Q);
1295 
1296           // We need to take the minimum number of known bits
1297           KnownBits Known3(Known);
1298           computeKnownBits(L, Known3, Depth + 1, Q);
1299 
1300           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1301                                          Known3.countMinTrailingZeros()));
1302 
1303           if (DontImproveNonNegativePhiBits)
1304             break;
1305 
1306           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1307           if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1308             // If initial value of recurrence is nonnegative, and we are adding
1309             // a nonnegative number with nsw, the result can only be nonnegative
1310             // or poison value regardless of the number of times we execute the
1311             // add in phi recurrence. If initial value is negative and we are
1312             // adding a negative number with nsw, the result can only be
1313             // negative or poison value. Similar arguments apply to sub and mul.
1314             //
1315             // (add non-negative, non-negative) --> non-negative
1316             // (add negative, negative) --> negative
1317             if (Opcode == Instruction::Add) {
1318               if (Known2.isNonNegative() && Known3.isNonNegative())
1319                 Known.makeNonNegative();
1320               else if (Known2.isNegative() && Known3.isNegative())
1321                 Known.makeNegative();
1322             }
1323 
1324             // (sub nsw non-negative, negative) --> non-negative
1325             // (sub nsw negative, non-negative) --> negative
1326             else if (Opcode == Instruction::Sub && LL == I) {
1327               if (Known2.isNonNegative() && Known3.isNegative())
1328                 Known.makeNonNegative();
1329               else if (Known2.isNegative() && Known3.isNonNegative())
1330                 Known.makeNegative();
1331             }
1332 
1333             // (mul nsw non-negative, non-negative) --> non-negative
1334             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1335                      Known3.isNonNegative())
1336               Known.makeNonNegative();
1337           }
1338 
1339           break;
1340         }
1341       }
1342     }
1343 
1344     // Unreachable blocks may have zero-operand PHI nodes.
1345     if (P->getNumIncomingValues() == 0)
1346       break;
1347 
1348     // Otherwise take the unions of the known bit sets of the operands,
1349     // taking conservative care to avoid excessive recursion.
1350     if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1351       // Skip if every incoming value references to ourself.
1352       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1353         break;
1354 
1355       Known.Zero.setAllBits();
1356       Known.One.setAllBits();
1357       for (Value *IncValue : P->incoming_values()) {
1358         // Skip direct self references.
1359         if (IncValue == P) continue;
1360 
1361         Known2 = KnownBits(BitWidth);
1362         // Recurse, but cap the recursion to one level, because we don't
1363         // want to waste time spinning around in loops.
1364         computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1365         Known.Zero &= Known2.Zero;
1366         Known.One &= Known2.One;
1367         // If all bits have been ruled out, there's no need to check
1368         // more operands.
1369         if (!Known.Zero && !Known.One)
1370           break;
1371       }
1372     }
1373     break;
1374   }
1375   case Instruction::Call:
1376   case Instruction::Invoke:
1377     // If range metadata is attached to this call, set known bits from that,
1378     // and then intersect with known bits based on other properties of the
1379     // function.
1380     if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1381       computeKnownBitsFromRangeMetadata(*MD, Known);
1382     if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1383       computeKnownBits(RV, Known2, Depth + 1, Q);
1384       Known.Zero |= Known2.Zero;
1385       Known.One |= Known2.One;
1386     }
1387     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1388       switch (II->getIntrinsicID()) {
1389       default: break;
1390       case Intrinsic::bitreverse:
1391         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1392         Known.Zero |= Known2.Zero.reverseBits();
1393         Known.One |= Known2.One.reverseBits();
1394         break;
1395       case Intrinsic::bswap:
1396         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1397         Known.Zero |= Known2.Zero.byteSwap();
1398         Known.One |= Known2.One.byteSwap();
1399         break;
1400       case Intrinsic::ctlz: {
1401         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1402         // If we have a known 1, its position is our upper bound.
1403         unsigned PossibleLZ = Known2.One.countLeadingZeros();
1404         // If this call is undefined for 0, the result will be less than 2^n.
1405         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1406           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1407         unsigned LowBits = Log2_32(PossibleLZ)+1;
1408         Known.Zero.setBitsFrom(LowBits);
1409         break;
1410       }
1411       case Intrinsic::cttz: {
1412         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1413         // If we have a known 1, its position is our upper bound.
1414         unsigned PossibleTZ = Known2.One.countTrailingZeros();
1415         // If this call is undefined for 0, the result will be less than 2^n.
1416         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1417           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1418         unsigned LowBits = Log2_32(PossibleTZ)+1;
1419         Known.Zero.setBitsFrom(LowBits);
1420         break;
1421       }
1422       case Intrinsic::ctpop: {
1423         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1424         // We can bound the space the count needs.  Also, bits known to be zero
1425         // can't contribute to the population.
1426         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1427         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1428         Known.Zero.setBitsFrom(LowBits);
1429         // TODO: we could bound KnownOne using the lower bound on the number
1430         // of bits which might be set provided by popcnt KnownOne2.
1431         break;
1432       }
1433       case Intrinsic::x86_sse42_crc32_64_64:
1434         Known.Zero.setBitsFrom(32);
1435         break;
1436       }
1437     }
1438     break;
1439   case Instruction::ExtractElement:
1440     // Look through extract element. At the moment we keep this simple and skip
1441     // tracking the specific element. But at least we might find information
1442     // valid for all elements of the vector (for example if vector is sign
1443     // extended, shifted, etc).
1444     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1445     break;
1446   case Instruction::ExtractValue:
1447     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1448       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1449       if (EVI->getNumIndices() != 1) break;
1450       if (EVI->getIndices()[0] == 0) {
1451         switch (II->getIntrinsicID()) {
1452         default: break;
1453         case Intrinsic::uadd_with_overflow:
1454         case Intrinsic::sadd_with_overflow:
1455           computeKnownBitsAddSub(true, II->getArgOperand(0),
1456                                  II->getArgOperand(1), false, Known, Known2,
1457                                  Depth, Q);
1458           break;
1459         case Intrinsic::usub_with_overflow:
1460         case Intrinsic::ssub_with_overflow:
1461           computeKnownBitsAddSub(false, II->getArgOperand(0),
1462                                  II->getArgOperand(1), false, Known, Known2,
1463                                  Depth, Q);
1464           break;
1465         case Intrinsic::umul_with_overflow:
1466         case Intrinsic::smul_with_overflow:
1467           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1468                               Known, Known2, Depth, Q);
1469           break;
1470         }
1471       }
1472     }
1473   }
1474 }
1475 
1476 /// Determine which bits of V are known to be either zero or one and return
1477 /// them.
1478 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1479   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1480   computeKnownBits(V, Known, Depth, Q);
1481   return Known;
1482 }
1483 
1484 /// Determine which bits of V are known to be either zero or one and return
1485 /// them in the Known bit set.
1486 ///
1487 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1488 /// we cannot optimize based on the assumption that it is zero without changing
1489 /// it to be an explicit zero.  If we don't change it to zero, other code could
1490 /// optimized based on the contradictory assumption that it is non-zero.
1491 /// Because instcombine aggressively folds operations with undef args anyway,
1492 /// this won't lose us code quality.
1493 ///
1494 /// This function is defined on values with integer type, values with pointer
1495 /// type, and vectors of integers.  In the case
1496 /// where V is a vector, known zero, and known one values are the
1497 /// same width as the vector element, and the bit is set only if it is true
1498 /// for all of the elements in the vector.
1499 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1500                       const Query &Q) {
1501   assert(V && "No Value?");
1502   assert(Depth <= MaxDepth && "Limit Search Depth");
1503   unsigned BitWidth = Known.getBitWidth();
1504 
1505   assert((V->getType()->isIntOrIntVectorTy() ||
1506           V->getType()->getScalarType()->isPointerTy()) &&
1507          "Not integer or pointer type!");
1508   assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1509          (!V->getType()->isIntOrIntVectorTy() ||
1510           V->getType()->getScalarSizeInBits() == BitWidth) &&
1511          "V and Known should have same BitWidth");
1512   (void)BitWidth;
1513 
1514   const APInt *C;
1515   if (match(V, m_APInt(C))) {
1516     // We know all of the bits for a scalar constant or a splat vector constant!
1517     Known.One = *C;
1518     Known.Zero = ~Known.One;
1519     return;
1520   }
1521   // Null and aggregate-zero are all-zeros.
1522   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1523     Known.setAllZero();
1524     return;
1525   }
1526   // Handle a constant vector by taking the intersection of the known bits of
1527   // each element.
1528   if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1529     // We know that CDS must be a vector of integers. Take the intersection of
1530     // each element.
1531     Known.Zero.setAllBits(); Known.One.setAllBits();
1532     APInt Elt(BitWidth, 0);
1533     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1534       Elt = CDS->getElementAsInteger(i);
1535       Known.Zero &= ~Elt;
1536       Known.One &= Elt;
1537     }
1538     return;
1539   }
1540 
1541   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1542     // We know that CV must be a vector of integers. Take the intersection of
1543     // each element.
1544     Known.Zero.setAllBits(); Known.One.setAllBits();
1545     APInt Elt(BitWidth, 0);
1546     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1547       Constant *Element = CV->getAggregateElement(i);
1548       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1549       if (!ElementCI) {
1550         Known.resetAll();
1551         return;
1552       }
1553       Elt = ElementCI->getValue();
1554       Known.Zero &= ~Elt;
1555       Known.One &= Elt;
1556     }
1557     return;
1558   }
1559 
1560   // Start out not knowing anything.
1561   Known.resetAll();
1562 
1563   // We can't imply anything about undefs.
1564   if (isa<UndefValue>(V))
1565     return;
1566 
1567   // There's no point in looking through other users of ConstantData for
1568   // assumptions.  Confirm that we've handled them all.
1569   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1570 
1571   // Limit search depth.
1572   // All recursive calls that increase depth must come after this.
1573   if (Depth == MaxDepth)
1574     return;
1575 
1576   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1577   // the bits of its aliasee.
1578   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1579     if (!GA->isInterposable())
1580       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1581     return;
1582   }
1583 
1584   if (const Operator *I = dyn_cast<Operator>(V))
1585     computeKnownBitsFromOperator(I, Known, Depth, Q);
1586 
1587   // Aligned pointers have trailing zeros - refine Known.Zero set
1588   if (V->getType()->isPointerTy()) {
1589     unsigned Align = V->getPointerAlignment(Q.DL);
1590     if (Align)
1591       Known.Zero.setLowBits(countTrailingZeros(Align));
1592   }
1593 
1594   // computeKnownBitsFromAssume strictly refines Known.
1595   // Therefore, we run them after computeKnownBitsFromOperator.
1596 
1597   // Check whether a nearby assume intrinsic can determine some known bits.
1598   computeKnownBitsFromAssume(V, Known, Depth, Q);
1599 
1600   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1601 }
1602 
1603 /// Return true if the given value is known to have exactly one
1604 /// bit set when defined. For vectors return true if every element is known to
1605 /// be a power of two when defined. Supports values with integer or pointer
1606 /// types and vectors of integers.
1607 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1608                             const Query &Q) {
1609   if (const Constant *C = dyn_cast<Constant>(V)) {
1610     if (C->isNullValue())
1611       return OrZero;
1612 
1613     const APInt *ConstIntOrConstSplatInt;
1614     if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1615       return ConstIntOrConstSplatInt->isPowerOf2();
1616   }
1617 
1618   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1619   // it is shifted off the end then the result is undefined.
1620   if (match(V, m_Shl(m_One(), m_Value())))
1621     return true;
1622 
1623   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1624   // the bottom.  If it is shifted off the bottom then the result is undefined.
1625   if (match(V, m_LShr(m_SignMask(), m_Value())))
1626     return true;
1627 
1628   // The remaining tests are all recursive, so bail out if we hit the limit.
1629   if (Depth++ == MaxDepth)
1630     return false;
1631 
1632   Value *X = nullptr, *Y = nullptr;
1633   // A shift left or a logical shift right of a power of two is a power of two
1634   // or zero.
1635   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1636                  match(V, m_LShr(m_Value(X), m_Value()))))
1637     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1638 
1639   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1640     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1641 
1642   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1643     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1644            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1645 
1646   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1647     // A power of two and'd with anything is a power of two or zero.
1648     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1649         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1650       return true;
1651     // X & (-X) is always a power of two or zero.
1652     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1653       return true;
1654     return false;
1655   }
1656 
1657   // Adding a power-of-two or zero to the same power-of-two or zero yields
1658   // either the original power-of-two, a larger power-of-two or zero.
1659   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1660     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1661     if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1662       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1663           match(X, m_And(m_Value(), m_Specific(Y))))
1664         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1665           return true;
1666       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1667           match(Y, m_And(m_Value(), m_Specific(X))))
1668         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1669           return true;
1670 
1671       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1672       KnownBits LHSBits(BitWidth);
1673       computeKnownBits(X, LHSBits, Depth, Q);
1674 
1675       KnownBits RHSBits(BitWidth);
1676       computeKnownBits(Y, RHSBits, Depth, Q);
1677       // If i8 V is a power of two or zero:
1678       //  ZeroBits: 1 1 1 0 1 1 1 1
1679       // ~ZeroBits: 0 0 0 1 0 0 0 0
1680       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1681         // If OrZero isn't set, we cannot give back a zero result.
1682         // Make sure either the LHS or RHS has a bit set.
1683         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1684           return true;
1685     }
1686   }
1687 
1688   // An exact divide or right shift can only shift off zero bits, so the result
1689   // is a power of two only if the first operand is a power of two and not
1690   // copying a sign bit (sdiv int_min, 2).
1691   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1692       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1693     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1694                                   Depth, Q);
1695   }
1696 
1697   return false;
1698 }
1699 
1700 /// \brief Test whether a GEP's result is known to be non-null.
1701 ///
1702 /// Uses properties inherent in a GEP to try to determine whether it is known
1703 /// to be non-null.
1704 ///
1705 /// Currently this routine does not support vector GEPs.
1706 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1707                               const Query &Q) {
1708   if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1709     return false;
1710 
1711   // FIXME: Support vector-GEPs.
1712   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1713 
1714   // If the base pointer is non-null, we cannot walk to a null address with an
1715   // inbounds GEP in address space zero.
1716   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1717     return true;
1718 
1719   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1720   // If so, then the GEP cannot produce a null pointer, as doing so would
1721   // inherently violate the inbounds contract within address space zero.
1722   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1723        GTI != GTE; ++GTI) {
1724     // Struct types are easy -- they must always be indexed by a constant.
1725     if (StructType *STy = GTI.getStructTypeOrNull()) {
1726       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1727       unsigned ElementIdx = OpC->getZExtValue();
1728       const StructLayout *SL = Q.DL.getStructLayout(STy);
1729       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1730       if (ElementOffset > 0)
1731         return true;
1732       continue;
1733     }
1734 
1735     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1736     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1737       continue;
1738 
1739     // Fast path the constant operand case both for efficiency and so we don't
1740     // increment Depth when just zipping down an all-constant GEP.
1741     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1742       if (!OpC->isZero())
1743         return true;
1744       continue;
1745     }
1746 
1747     // We post-increment Depth here because while isKnownNonZero increments it
1748     // as well, when we pop back up that increment won't persist. We don't want
1749     // to recurse 10k times just because we have 10k GEP operands. We don't
1750     // bail completely out because we want to handle constant GEPs regardless
1751     // of depth.
1752     if (Depth++ >= MaxDepth)
1753       continue;
1754 
1755     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1756       return true;
1757   }
1758 
1759   return false;
1760 }
1761 
1762 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1763 /// ensure that the value it's attached to is never Value?  'RangeType' is
1764 /// is the type of the value described by the range.
1765 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1766   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1767   assert(NumRanges >= 1);
1768   for (unsigned i = 0; i < NumRanges; ++i) {
1769     ConstantInt *Lower =
1770         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1771     ConstantInt *Upper =
1772         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1773     ConstantRange Range(Lower->getValue(), Upper->getValue());
1774     if (Range.contains(Value))
1775       return false;
1776   }
1777   return true;
1778 }
1779 
1780 /// Return true if the given value is known to be non-zero when defined. For
1781 /// vectors, return true if every element is known to be non-zero when
1782 /// defined. For pointers, if the context instruction and dominator tree are
1783 /// specified, perform context-sensitive analysis and return true if the
1784 /// pointer couldn't possibly be null at the specified instruction.
1785 /// Supports values with integer or pointer type and vectors of integers.
1786 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1787   if (auto *C = dyn_cast<Constant>(V)) {
1788     if (C->isNullValue())
1789       return false;
1790     if (isa<ConstantInt>(C))
1791       // Must be non-zero due to null test above.
1792       return true;
1793 
1794     // For constant vectors, check that all elements are undefined or known
1795     // non-zero to determine that the whole vector is known non-zero.
1796     if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1797       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1798         Constant *Elt = C->getAggregateElement(i);
1799         if (!Elt || Elt->isNullValue())
1800           return false;
1801         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1802           return false;
1803       }
1804       return true;
1805     }
1806 
1807     return false;
1808   }
1809 
1810   if (auto *I = dyn_cast<Instruction>(V)) {
1811     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1812       // If the possible ranges don't contain zero, then the value is
1813       // definitely non-zero.
1814       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1815         const APInt ZeroValue(Ty->getBitWidth(), 0);
1816         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1817           return true;
1818       }
1819     }
1820   }
1821 
1822   // The remaining tests are all recursive, so bail out if we hit the limit.
1823   if (Depth++ >= MaxDepth)
1824     return false;
1825 
1826   // Check for pointer simplifications.
1827   if (V->getType()->isPointerTy()) {
1828     if (isKnownNonNullAt(V, Q.CxtI, Q.DT))
1829       return true;
1830     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1831       if (isGEPKnownNonNull(GEP, Depth, Q))
1832         return true;
1833   }
1834 
1835   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1836 
1837   // X | Y != 0 if X != 0 or Y != 0.
1838   Value *X = nullptr, *Y = nullptr;
1839   if (match(V, m_Or(m_Value(X), m_Value(Y))))
1840     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1841 
1842   // ext X != 0 if X != 0.
1843   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1844     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1845 
1846   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
1847   // if the lowest bit is shifted off the end.
1848   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1849     // shl nuw can't remove any non-zero bits.
1850     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1851     if (BO->hasNoUnsignedWrap())
1852       return isKnownNonZero(X, Depth, Q);
1853 
1854     KnownBits Known(BitWidth);
1855     computeKnownBits(X, Known, Depth, Q);
1856     if (Known.One[0])
1857       return true;
1858   }
1859   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
1860   // defined if the sign bit is shifted off the end.
1861   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1862     // shr exact can only shift out zero bits.
1863     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1864     if (BO->isExact())
1865       return isKnownNonZero(X, Depth, Q);
1866 
1867     KnownBits Known = computeKnownBits(X, Depth, Q);
1868     if (Known.isNegative())
1869       return true;
1870 
1871     // If the shifter operand is a constant, and all of the bits shifted
1872     // out are known to be zero, and X is known non-zero then at least one
1873     // non-zero bit must remain.
1874     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1875       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1876       // Is there a known one in the portion not shifted out?
1877       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
1878         return true;
1879       // Are all the bits to be shifted out known zero?
1880       if (Known.countMinTrailingZeros() >= ShiftVal)
1881         return isKnownNonZero(X, Depth, Q);
1882     }
1883   }
1884   // div exact can only produce a zero if the dividend is zero.
1885   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1886     return isKnownNonZero(X, Depth, Q);
1887   }
1888   // X + Y.
1889   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1890     KnownBits XKnown = computeKnownBits(X, Depth, Q);
1891     KnownBits YKnown = computeKnownBits(Y, Depth, Q);
1892 
1893     // If X and Y are both non-negative (as signed values) then their sum is not
1894     // zero unless both X and Y are zero.
1895     if (XKnown.isNonNegative() && YKnown.isNonNegative())
1896       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1897         return true;
1898 
1899     // If X and Y are both negative (as signed values) then their sum is not
1900     // zero unless both X and Y equal INT_MIN.
1901     if (XKnown.isNegative() && YKnown.isNegative()) {
1902       APInt Mask = APInt::getSignedMaxValue(BitWidth);
1903       // The sign bit of X is set.  If some other bit is set then X is not equal
1904       // to INT_MIN.
1905       if (XKnown.One.intersects(Mask))
1906         return true;
1907       // The sign bit of Y is set.  If some other bit is set then Y is not equal
1908       // to INT_MIN.
1909       if (YKnown.One.intersects(Mask))
1910         return true;
1911     }
1912 
1913     // The sum of a non-negative number and a power of two is not zero.
1914     if (XKnown.isNonNegative() &&
1915         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1916       return true;
1917     if (YKnown.isNonNegative() &&
1918         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1919       return true;
1920   }
1921   // X * Y.
1922   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1923     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1924     // If X and Y are non-zero then so is X * Y as long as the multiplication
1925     // does not overflow.
1926     if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1927         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1928       return true;
1929   }
1930   // (C ? X : Y) != 0 if X != 0 and Y != 0.
1931   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
1932     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1933         isKnownNonZero(SI->getFalseValue(), Depth, Q))
1934       return true;
1935   }
1936   // PHI
1937   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
1938     // Try and detect a recurrence that monotonically increases from a
1939     // starting value, as these are common as induction variables.
1940     if (PN->getNumIncomingValues() == 2) {
1941       Value *Start = PN->getIncomingValue(0);
1942       Value *Induction = PN->getIncomingValue(1);
1943       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1944         std::swap(Start, Induction);
1945       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1946         if (!C->isZero() && !C->isNegative()) {
1947           ConstantInt *X;
1948           if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1949                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1950               !X->isNegative())
1951             return true;
1952         }
1953       }
1954     }
1955     // Check if all incoming values are non-zero constant.
1956     bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
1957       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue();
1958     });
1959     if (AllNonZeroConstants)
1960       return true;
1961   }
1962 
1963   KnownBits Known(BitWidth);
1964   computeKnownBits(V, Known, Depth, Q);
1965   return Known.One != 0;
1966 }
1967 
1968 /// Return true if V2 == V1 + X, where X is known non-zero.
1969 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
1970   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
1971   if (!BO || BO->getOpcode() != Instruction::Add)
1972     return false;
1973   Value *Op = nullptr;
1974   if (V2 == BO->getOperand(0))
1975     Op = BO->getOperand(1);
1976   else if (V2 == BO->getOperand(1))
1977     Op = BO->getOperand(0);
1978   else
1979     return false;
1980   return isKnownNonZero(Op, 0, Q);
1981 }
1982 
1983 /// Return true if it is known that V1 != V2.
1984 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
1985   if (V1 == V2)
1986     return false;
1987   if (V1->getType() != V2->getType())
1988     // We can't look through casts yet.
1989     return false;
1990   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
1991     return true;
1992 
1993   if (V1->getType()->isIntOrIntVectorTy()) {
1994     // Are any known bits in V1 contradictory to known bits in V2? If V1
1995     // has a known zero where V2 has a known one, they must not be equal.
1996     KnownBits Known1 = computeKnownBits(V1, 0, Q);
1997     KnownBits Known2 = computeKnownBits(V2, 0, Q);
1998 
1999     if (Known1.Zero.intersects(Known2.One) ||
2000         Known2.Zero.intersects(Known1.One))
2001       return true;
2002   }
2003   return false;
2004 }
2005 
2006 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2007 /// simplify operations downstream. Mask is known to be zero for bits that V
2008 /// cannot have.
2009 ///
2010 /// This function is defined on values with integer type, values with pointer
2011 /// type, and vectors of integers.  In the case
2012 /// where V is a vector, the mask, known zero, and known one values are the
2013 /// same width as the vector element, and the bit is set only if it is true
2014 /// for all of the elements in the vector.
2015 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2016                        const Query &Q) {
2017   KnownBits Known(Mask.getBitWidth());
2018   computeKnownBits(V, Known, Depth, Q);
2019   return Mask.isSubsetOf(Known.Zero);
2020 }
2021 
2022 /// For vector constants, loop over the elements and find the constant with the
2023 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2024 /// or if any element was not analyzed; otherwise, return the count for the
2025 /// element with the minimum number of sign bits.
2026 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2027                                                  unsigned TyBits) {
2028   const auto *CV = dyn_cast<Constant>(V);
2029   if (!CV || !CV->getType()->isVectorTy())
2030     return 0;
2031 
2032   unsigned MinSignBits = TyBits;
2033   unsigned NumElts = CV->getType()->getVectorNumElements();
2034   for (unsigned i = 0; i != NumElts; ++i) {
2035     // If we find a non-ConstantInt, bail out.
2036     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2037     if (!Elt)
2038       return 0;
2039 
2040     // If the sign bit is 1, flip the bits, so we always count leading zeros.
2041     APInt EltVal = Elt->getValue();
2042     if (EltVal.isNegative())
2043       EltVal = ~EltVal;
2044     MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros());
2045   }
2046 
2047   return MinSignBits;
2048 }
2049 
2050 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2051                                        const Query &Q);
2052 
2053 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2054                                    const Query &Q) {
2055   unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2056   assert(Result > 0 && "At least one sign bit needs to be present!");
2057   return Result;
2058 }
2059 
2060 /// Return the number of times the sign bit of the register is replicated into
2061 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2062 /// (itself), but other cases can give us information. For example, immediately
2063 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2064 /// other, so we return 3. For vectors, return the number of sign bits for the
2065 /// vector element with the mininum number of known sign bits.
2066 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2067                                        const Query &Q) {
2068 
2069   // We return the minimum number of sign bits that are guaranteed to be present
2070   // in V, so for undef we have to conservatively return 1.  We don't have the
2071   // same behavior for poison though -- that's a FIXME today.
2072 
2073   unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2074   unsigned Tmp, Tmp2;
2075   unsigned FirstAnswer = 1;
2076 
2077   // Note that ConstantInt is handled by the general computeKnownBits case
2078   // below.
2079 
2080   if (Depth == MaxDepth)
2081     return 1;  // Limit search depth.
2082 
2083   const Operator *U = dyn_cast<Operator>(V);
2084   switch (Operator::getOpcode(V)) {
2085   default: break;
2086   case Instruction::SExt:
2087     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2088     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2089 
2090   case Instruction::SDiv: {
2091     const APInt *Denominator;
2092     // sdiv X, C -> adds log(C) sign bits.
2093     if (match(U->getOperand(1), m_APInt(Denominator))) {
2094 
2095       // Ignore non-positive denominator.
2096       if (!Denominator->isStrictlyPositive())
2097         break;
2098 
2099       // Calculate the incoming numerator bits.
2100       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2101 
2102       // Add floor(log(C)) bits to the numerator bits.
2103       return std::min(TyBits, NumBits + Denominator->logBase2());
2104     }
2105     break;
2106   }
2107 
2108   case Instruction::SRem: {
2109     const APInt *Denominator;
2110     // srem X, C -> we know that the result is within [-C+1,C) when C is a
2111     // positive constant.  This let us put a lower bound on the number of sign
2112     // bits.
2113     if (match(U->getOperand(1), m_APInt(Denominator))) {
2114 
2115       // Ignore non-positive denominator.
2116       if (!Denominator->isStrictlyPositive())
2117         break;
2118 
2119       // Calculate the incoming numerator bits. SRem by a positive constant
2120       // can't lower the number of sign bits.
2121       unsigned NumrBits =
2122           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2123 
2124       // Calculate the leading sign bit constraints by examining the
2125       // denominator.  Given that the denominator is positive, there are two
2126       // cases:
2127       //
2128       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
2129       //     (1 << ceilLogBase2(C)).
2130       //
2131       //  2. the numerator is negative.  Then the result range is (-C,0] and
2132       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2133       //
2134       // Thus a lower bound on the number of sign bits is `TyBits -
2135       // ceilLogBase2(C)`.
2136 
2137       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2138       return std::max(NumrBits, ResBits);
2139     }
2140     break;
2141   }
2142 
2143   case Instruction::AShr: {
2144     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2145     // ashr X, C   -> adds C sign bits.  Vectors too.
2146     const APInt *ShAmt;
2147     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2148       unsigned ShAmtLimited = ShAmt->getZExtValue();
2149       if (ShAmtLimited >= TyBits)
2150         break;  // Bad shift.
2151       Tmp += ShAmtLimited;
2152       if (Tmp > TyBits) Tmp = TyBits;
2153     }
2154     return Tmp;
2155   }
2156   case Instruction::Shl: {
2157     const APInt *ShAmt;
2158     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2159       // shl destroys sign bits.
2160       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2161       Tmp2 = ShAmt->getZExtValue();
2162       if (Tmp2 >= TyBits ||      // Bad shift.
2163           Tmp2 >= Tmp) break;    // Shifted all sign bits out.
2164       return Tmp - Tmp2;
2165     }
2166     break;
2167   }
2168   case Instruction::And:
2169   case Instruction::Or:
2170   case Instruction::Xor:    // NOT is handled here.
2171     // Logical binary ops preserve the number of sign bits at the worst.
2172     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2173     if (Tmp != 1) {
2174       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2175       FirstAnswer = std::min(Tmp, Tmp2);
2176       // We computed what we know about the sign bits as our first
2177       // answer. Now proceed to the generic code that uses
2178       // computeKnownBits, and pick whichever answer is better.
2179     }
2180     break;
2181 
2182   case Instruction::Select:
2183     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2184     if (Tmp == 1) return 1;  // Early out.
2185     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2186     return std::min(Tmp, Tmp2);
2187 
2188   case Instruction::Add:
2189     // Add can have at most one carry bit.  Thus we know that the output
2190     // is, at worst, one more bit than the inputs.
2191     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2192     if (Tmp == 1) return 1;  // Early out.
2193 
2194     // Special case decrementing a value (ADD X, -1):
2195     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2196       if (CRHS->isAllOnesValue()) {
2197         KnownBits Known(TyBits);
2198         computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2199 
2200         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2201         // sign bits set.
2202         if ((Known.Zero | 1).isAllOnesValue())
2203           return TyBits;
2204 
2205         // If we are subtracting one from a positive number, there is no carry
2206         // out of the result.
2207         if (Known.isNonNegative())
2208           return Tmp;
2209       }
2210 
2211     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2212     if (Tmp2 == 1) return 1;
2213     return std::min(Tmp, Tmp2)-1;
2214 
2215   case Instruction::Sub:
2216     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2217     if (Tmp2 == 1) return 1;
2218 
2219     // Handle NEG.
2220     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2221       if (CLHS->isNullValue()) {
2222         KnownBits Known(TyBits);
2223         computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2224         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2225         // sign bits set.
2226         if ((Known.Zero | 1).isAllOnesValue())
2227           return TyBits;
2228 
2229         // If the input is known to be positive (the sign bit is known clear),
2230         // the output of the NEG has the same number of sign bits as the input.
2231         if (Known.isNonNegative())
2232           return Tmp2;
2233 
2234         // Otherwise, we treat this like a SUB.
2235       }
2236 
2237     // Sub can have at most one carry bit.  Thus we know that the output
2238     // is, at worst, one more bit than the inputs.
2239     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2240     if (Tmp == 1) return 1;  // Early out.
2241     return std::min(Tmp, Tmp2)-1;
2242 
2243   case Instruction::PHI: {
2244     const PHINode *PN = cast<PHINode>(U);
2245     unsigned NumIncomingValues = PN->getNumIncomingValues();
2246     // Don't analyze large in-degree PHIs.
2247     if (NumIncomingValues > 4) break;
2248     // Unreachable blocks may have zero-operand PHI nodes.
2249     if (NumIncomingValues == 0) break;
2250 
2251     // Take the minimum of all incoming values.  This can't infinitely loop
2252     // because of our depth threshold.
2253     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2254     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2255       if (Tmp == 1) return Tmp;
2256       Tmp = std::min(
2257           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2258     }
2259     return Tmp;
2260   }
2261 
2262   case Instruction::Trunc:
2263     // FIXME: it's tricky to do anything useful for this, but it is an important
2264     // case for targets like X86.
2265     break;
2266 
2267   case Instruction::ExtractElement:
2268     // Look through extract element. At the moment we keep this simple and skip
2269     // tracking the specific element. But at least we might find information
2270     // valid for all elements of the vector (for example if vector is sign
2271     // extended, shifted, etc).
2272     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2273   }
2274 
2275   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2276   // use this information.
2277 
2278   // If we can examine all elements of a vector constant successfully, we're
2279   // done (we can't do any better than that). If not, keep trying.
2280   if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2281     return VecSignBits;
2282 
2283   KnownBits Known(TyBits);
2284   computeKnownBits(V, Known, Depth, Q);
2285 
2286   // If we know that the sign bit is either zero or one, determine the number of
2287   // identical bits in the top of the input value.
2288   return std::max(FirstAnswer, Known.countMinSignBits());
2289 }
2290 
2291 /// This function computes the integer multiple of Base that equals V.
2292 /// If successful, it returns true and returns the multiple in
2293 /// Multiple. If unsuccessful, it returns false. It looks
2294 /// through SExt instructions only if LookThroughSExt is true.
2295 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2296                            bool LookThroughSExt, unsigned Depth) {
2297   const unsigned MaxDepth = 6;
2298 
2299   assert(V && "No Value?");
2300   assert(Depth <= MaxDepth && "Limit Search Depth");
2301   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2302 
2303   Type *T = V->getType();
2304 
2305   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2306 
2307   if (Base == 0)
2308     return false;
2309 
2310   if (Base == 1) {
2311     Multiple = V;
2312     return true;
2313   }
2314 
2315   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2316   Constant *BaseVal = ConstantInt::get(T, Base);
2317   if (CO && CO == BaseVal) {
2318     // Multiple is 1.
2319     Multiple = ConstantInt::get(T, 1);
2320     return true;
2321   }
2322 
2323   if (CI && CI->getZExtValue() % Base == 0) {
2324     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2325     return true;
2326   }
2327 
2328   if (Depth == MaxDepth) return false;  // Limit search depth.
2329 
2330   Operator *I = dyn_cast<Operator>(V);
2331   if (!I) return false;
2332 
2333   switch (I->getOpcode()) {
2334   default: break;
2335   case Instruction::SExt:
2336     if (!LookThroughSExt) return false;
2337     // otherwise fall through to ZExt
2338     LLVM_FALLTHROUGH;
2339   case Instruction::ZExt:
2340     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2341                            LookThroughSExt, Depth+1);
2342   case Instruction::Shl:
2343   case Instruction::Mul: {
2344     Value *Op0 = I->getOperand(0);
2345     Value *Op1 = I->getOperand(1);
2346 
2347     if (I->getOpcode() == Instruction::Shl) {
2348       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2349       if (!Op1CI) return false;
2350       // Turn Op0 << Op1 into Op0 * 2^Op1
2351       APInt Op1Int = Op1CI->getValue();
2352       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2353       APInt API(Op1Int.getBitWidth(), 0);
2354       API.setBit(BitToSet);
2355       Op1 = ConstantInt::get(V->getContext(), API);
2356     }
2357 
2358     Value *Mul0 = nullptr;
2359     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2360       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2361         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2362           if (Op1C->getType()->getPrimitiveSizeInBits() <
2363               MulC->getType()->getPrimitiveSizeInBits())
2364             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2365           if (Op1C->getType()->getPrimitiveSizeInBits() >
2366               MulC->getType()->getPrimitiveSizeInBits())
2367             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2368 
2369           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2370           Multiple = ConstantExpr::getMul(MulC, Op1C);
2371           return true;
2372         }
2373 
2374       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2375         if (Mul0CI->getValue() == 1) {
2376           // V == Base * Op1, so return Op1
2377           Multiple = Op1;
2378           return true;
2379         }
2380     }
2381 
2382     Value *Mul1 = nullptr;
2383     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2384       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2385         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2386           if (Op0C->getType()->getPrimitiveSizeInBits() <
2387               MulC->getType()->getPrimitiveSizeInBits())
2388             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2389           if (Op0C->getType()->getPrimitiveSizeInBits() >
2390               MulC->getType()->getPrimitiveSizeInBits())
2391             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2392 
2393           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2394           Multiple = ConstantExpr::getMul(MulC, Op0C);
2395           return true;
2396         }
2397 
2398       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2399         if (Mul1CI->getValue() == 1) {
2400           // V == Base * Op0, so return Op0
2401           Multiple = Op0;
2402           return true;
2403         }
2404     }
2405   }
2406   }
2407 
2408   // We could not determine if V is a multiple of Base.
2409   return false;
2410 }
2411 
2412 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2413                                             const TargetLibraryInfo *TLI) {
2414   const Function *F = ICS.getCalledFunction();
2415   if (!F)
2416     return Intrinsic::not_intrinsic;
2417 
2418   if (F->isIntrinsic())
2419     return F->getIntrinsicID();
2420 
2421   if (!TLI)
2422     return Intrinsic::not_intrinsic;
2423 
2424   LibFunc Func;
2425   // We're going to make assumptions on the semantics of the functions, check
2426   // that the target knows that it's available in this environment and it does
2427   // not have local linkage.
2428   if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2429     return Intrinsic::not_intrinsic;
2430 
2431   if (!ICS.onlyReadsMemory())
2432     return Intrinsic::not_intrinsic;
2433 
2434   // Otherwise check if we have a call to a function that can be turned into a
2435   // vector intrinsic.
2436   switch (Func) {
2437   default:
2438     break;
2439   case LibFunc_sin:
2440   case LibFunc_sinf:
2441   case LibFunc_sinl:
2442     return Intrinsic::sin;
2443   case LibFunc_cos:
2444   case LibFunc_cosf:
2445   case LibFunc_cosl:
2446     return Intrinsic::cos;
2447   case LibFunc_exp:
2448   case LibFunc_expf:
2449   case LibFunc_expl:
2450     return Intrinsic::exp;
2451   case LibFunc_exp2:
2452   case LibFunc_exp2f:
2453   case LibFunc_exp2l:
2454     return Intrinsic::exp2;
2455   case LibFunc_log:
2456   case LibFunc_logf:
2457   case LibFunc_logl:
2458     return Intrinsic::log;
2459   case LibFunc_log10:
2460   case LibFunc_log10f:
2461   case LibFunc_log10l:
2462     return Intrinsic::log10;
2463   case LibFunc_log2:
2464   case LibFunc_log2f:
2465   case LibFunc_log2l:
2466     return Intrinsic::log2;
2467   case LibFunc_fabs:
2468   case LibFunc_fabsf:
2469   case LibFunc_fabsl:
2470     return Intrinsic::fabs;
2471   case LibFunc_fmin:
2472   case LibFunc_fminf:
2473   case LibFunc_fminl:
2474     return Intrinsic::minnum;
2475   case LibFunc_fmax:
2476   case LibFunc_fmaxf:
2477   case LibFunc_fmaxl:
2478     return Intrinsic::maxnum;
2479   case LibFunc_copysign:
2480   case LibFunc_copysignf:
2481   case LibFunc_copysignl:
2482     return Intrinsic::copysign;
2483   case LibFunc_floor:
2484   case LibFunc_floorf:
2485   case LibFunc_floorl:
2486     return Intrinsic::floor;
2487   case LibFunc_ceil:
2488   case LibFunc_ceilf:
2489   case LibFunc_ceill:
2490     return Intrinsic::ceil;
2491   case LibFunc_trunc:
2492   case LibFunc_truncf:
2493   case LibFunc_truncl:
2494     return Intrinsic::trunc;
2495   case LibFunc_rint:
2496   case LibFunc_rintf:
2497   case LibFunc_rintl:
2498     return Intrinsic::rint;
2499   case LibFunc_nearbyint:
2500   case LibFunc_nearbyintf:
2501   case LibFunc_nearbyintl:
2502     return Intrinsic::nearbyint;
2503   case LibFunc_round:
2504   case LibFunc_roundf:
2505   case LibFunc_roundl:
2506     return Intrinsic::round;
2507   case LibFunc_pow:
2508   case LibFunc_powf:
2509   case LibFunc_powl:
2510     return Intrinsic::pow;
2511   case LibFunc_sqrt:
2512   case LibFunc_sqrtf:
2513   case LibFunc_sqrtl:
2514     if (ICS->hasNoNaNs())
2515       return Intrinsic::sqrt;
2516     return Intrinsic::not_intrinsic;
2517   }
2518 
2519   return Intrinsic::not_intrinsic;
2520 }
2521 
2522 /// Return true if we can prove that the specified FP value is never equal to
2523 /// -0.0.
2524 ///
2525 /// NOTE: this function will need to be revisited when we support non-default
2526 /// rounding modes!
2527 ///
2528 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2529                                 unsigned Depth) {
2530   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2531     return !CFP->getValueAPF().isNegZero();
2532 
2533   if (Depth == MaxDepth)
2534     return false;  // Limit search depth.
2535 
2536   const Operator *I = dyn_cast<Operator>(V);
2537   if (!I) return false;
2538 
2539   // Check if the nsz fast-math flag is set
2540   if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2541     if (FPO->hasNoSignedZeros())
2542       return true;
2543 
2544   // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2545   if (I->getOpcode() == Instruction::FAdd)
2546     if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2547       if (CFP->isNullValue())
2548         return true;
2549 
2550   // sitofp and uitofp turn into +0.0 for zero.
2551   if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2552     return true;
2553 
2554   if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2555     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2556     switch (IID) {
2557     default:
2558       break;
2559     // sqrt(-0.0) = -0.0, no other negative results are possible.
2560     case Intrinsic::sqrt:
2561       return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1);
2562     // fabs(x) != -0.0
2563     case Intrinsic::fabs:
2564       return true;
2565     }
2566   }
2567 
2568   return false;
2569 }
2570 
2571 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2572 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2573 /// bit despite comparing equal.
2574 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2575                                             const TargetLibraryInfo *TLI,
2576                                             bool SignBitOnly,
2577                                             unsigned Depth) {
2578   // TODO: This function does not do the right thing when SignBitOnly is true
2579   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2580   // which flips the sign bits of NaNs.  See
2581   // https://llvm.org/bugs/show_bug.cgi?id=31702.
2582 
2583   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2584     return !CFP->getValueAPF().isNegative() ||
2585            (!SignBitOnly && CFP->getValueAPF().isZero());
2586   }
2587 
2588   if (Depth == MaxDepth)
2589     return false; // Limit search depth.
2590 
2591   const Operator *I = dyn_cast<Operator>(V);
2592   if (!I)
2593     return false;
2594 
2595   switch (I->getOpcode()) {
2596   default:
2597     break;
2598   // Unsigned integers are always nonnegative.
2599   case Instruction::UIToFP:
2600     return true;
2601   case Instruction::FMul:
2602     // x*x is always non-negative or a NaN.
2603     if (I->getOperand(0) == I->getOperand(1) &&
2604         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2605       return true;
2606 
2607     LLVM_FALLTHROUGH;
2608   case Instruction::FAdd:
2609   case Instruction::FDiv:
2610   case Instruction::FRem:
2611     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2612                                            Depth + 1) &&
2613            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2614                                            Depth + 1);
2615   case Instruction::Select:
2616     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2617                                            Depth + 1) &&
2618            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2619                                            Depth + 1);
2620   case Instruction::FPExt:
2621   case Instruction::FPTrunc:
2622     // Widening/narrowing never change sign.
2623     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2624                                            Depth + 1);
2625   case Instruction::Call:
2626     const auto *CI = cast<CallInst>(I);
2627     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2628     switch (IID) {
2629     default:
2630       break;
2631     case Intrinsic::maxnum:
2632       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2633                                              Depth + 1) ||
2634              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2635                                              Depth + 1);
2636     case Intrinsic::minnum:
2637       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2638                                              Depth + 1) &&
2639              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2640                                              Depth + 1);
2641     case Intrinsic::exp:
2642     case Intrinsic::exp2:
2643     case Intrinsic::fabs:
2644       return true;
2645 
2646     case Intrinsic::sqrt:
2647       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
2648       if (!SignBitOnly)
2649         return true;
2650       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2651                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
2652 
2653     case Intrinsic::powi:
2654       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2655         // powi(x,n) is non-negative if n is even.
2656         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2657           return true;
2658       }
2659       // TODO: This is not correct.  Given that exp is an integer, here are the
2660       // ways that pow can return a negative value:
2661       //
2662       //   pow(x, exp)    --> negative if exp is odd and x is negative.
2663       //   pow(-0, exp)   --> -inf if exp is negative odd.
2664       //   pow(-0, exp)   --> -0 if exp is positive odd.
2665       //   pow(-inf, exp) --> -0 if exp is negative odd.
2666       //   pow(-inf, exp) --> -inf if exp is positive odd.
2667       //
2668       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2669       // but we must return false if x == -0.  Unfortunately we do not currently
2670       // have a way of expressing this constraint.  See details in
2671       // https://llvm.org/bugs/show_bug.cgi?id=31702.
2672       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2673                                              Depth + 1);
2674 
2675     case Intrinsic::fma:
2676     case Intrinsic::fmuladd:
2677       // x*x+y is non-negative if y is non-negative.
2678       return I->getOperand(0) == I->getOperand(1) &&
2679              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2680              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2681                                              Depth + 1);
2682     }
2683     break;
2684   }
2685   return false;
2686 }
2687 
2688 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2689                                        const TargetLibraryInfo *TLI) {
2690   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2691 }
2692 
2693 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
2694   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2695 }
2696 
2697 /// If the specified value can be set by repeating the same byte in memory,
2698 /// return the i8 value that it is represented with.  This is
2699 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2700 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
2701 /// byte store (e.g. i16 0x1234), return null.
2702 Value *llvm::isBytewiseValue(Value *V) {
2703   // All byte-wide stores are splatable, even of arbitrary variables.
2704   if (V->getType()->isIntegerTy(8)) return V;
2705 
2706   // Handle 'null' ConstantArrayZero etc.
2707   if (Constant *C = dyn_cast<Constant>(V))
2708     if (C->isNullValue())
2709       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2710 
2711   // Constant float and double values can be handled as integer values if the
2712   // corresponding integer value is "byteable".  An important case is 0.0.
2713   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2714     if (CFP->getType()->isFloatTy())
2715       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2716     if (CFP->getType()->isDoubleTy())
2717       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2718     // Don't handle long double formats, which have strange constraints.
2719   }
2720 
2721   // We can handle constant integers that are multiple of 8 bits.
2722   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2723     if (CI->getBitWidth() % 8 == 0) {
2724       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2725 
2726       if (!CI->getValue().isSplat(8))
2727         return nullptr;
2728       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2729     }
2730   }
2731 
2732   // A ConstantDataArray/Vector is splatable if all its members are equal and
2733   // also splatable.
2734   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2735     Value *Elt = CA->getElementAsConstant(0);
2736     Value *Val = isBytewiseValue(Elt);
2737     if (!Val)
2738       return nullptr;
2739 
2740     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2741       if (CA->getElementAsConstant(I) != Elt)
2742         return nullptr;
2743 
2744     return Val;
2745   }
2746 
2747   // Conceptually, we could handle things like:
2748   //   %a = zext i8 %X to i16
2749   //   %b = shl i16 %a, 8
2750   //   %c = or i16 %a, %b
2751   // but until there is an example that actually needs this, it doesn't seem
2752   // worth worrying about.
2753   return nullptr;
2754 }
2755 
2756 
2757 // This is the recursive version of BuildSubAggregate. It takes a few different
2758 // arguments. Idxs is the index within the nested struct From that we are
2759 // looking at now (which is of type IndexedType). IdxSkip is the number of
2760 // indices from Idxs that should be left out when inserting into the resulting
2761 // struct. To is the result struct built so far, new insertvalue instructions
2762 // build on that.
2763 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2764                                 SmallVectorImpl<unsigned> &Idxs,
2765                                 unsigned IdxSkip,
2766                                 Instruction *InsertBefore) {
2767   llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2768   if (STy) {
2769     // Save the original To argument so we can modify it
2770     Value *OrigTo = To;
2771     // General case, the type indexed by Idxs is a struct
2772     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2773       // Process each struct element recursively
2774       Idxs.push_back(i);
2775       Value *PrevTo = To;
2776       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2777                              InsertBefore);
2778       Idxs.pop_back();
2779       if (!To) {
2780         // Couldn't find any inserted value for this index? Cleanup
2781         while (PrevTo != OrigTo) {
2782           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2783           PrevTo = Del->getAggregateOperand();
2784           Del->eraseFromParent();
2785         }
2786         // Stop processing elements
2787         break;
2788       }
2789     }
2790     // If we successfully found a value for each of our subaggregates
2791     if (To)
2792       return To;
2793   }
2794   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2795   // the struct's elements had a value that was inserted directly. In the latter
2796   // case, perhaps we can't determine each of the subelements individually, but
2797   // we might be able to find the complete struct somewhere.
2798 
2799   // Find the value that is at that particular spot
2800   Value *V = FindInsertedValue(From, Idxs);
2801 
2802   if (!V)
2803     return nullptr;
2804 
2805   // Insert the value in the new (sub) aggregrate
2806   return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2807                                        "tmp", InsertBefore);
2808 }
2809 
2810 // This helper takes a nested struct and extracts a part of it (which is again a
2811 // struct) into a new value. For example, given the struct:
2812 // { a, { b, { c, d }, e } }
2813 // and the indices "1, 1" this returns
2814 // { c, d }.
2815 //
2816 // It does this by inserting an insertvalue for each element in the resulting
2817 // struct, as opposed to just inserting a single struct. This will only work if
2818 // each of the elements of the substruct are known (ie, inserted into From by an
2819 // insertvalue instruction somewhere).
2820 //
2821 // All inserted insertvalue instructions are inserted before InsertBefore
2822 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2823                                 Instruction *InsertBefore) {
2824   assert(InsertBefore && "Must have someplace to insert!");
2825   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2826                                                              idx_range);
2827   Value *To = UndefValue::get(IndexedType);
2828   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2829   unsigned IdxSkip = Idxs.size();
2830 
2831   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2832 }
2833 
2834 /// Given an aggregrate and an sequence of indices, see if
2835 /// the scalar value indexed is already around as a register, for example if it
2836 /// were inserted directly into the aggregrate.
2837 ///
2838 /// If InsertBefore is not null, this function will duplicate (modified)
2839 /// insertvalues when a part of a nested struct is extracted.
2840 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2841                                Instruction *InsertBefore) {
2842   // Nothing to index? Just return V then (this is useful at the end of our
2843   // recursion).
2844   if (idx_range.empty())
2845     return V;
2846   // We have indices, so V should have an indexable type.
2847   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2848          "Not looking at a struct or array?");
2849   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2850          "Invalid indices for type?");
2851 
2852   if (Constant *C = dyn_cast<Constant>(V)) {
2853     C = C->getAggregateElement(idx_range[0]);
2854     if (!C) return nullptr;
2855     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2856   }
2857 
2858   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2859     // Loop the indices for the insertvalue instruction in parallel with the
2860     // requested indices
2861     const unsigned *req_idx = idx_range.begin();
2862     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2863          i != e; ++i, ++req_idx) {
2864       if (req_idx == idx_range.end()) {
2865         // We can't handle this without inserting insertvalues
2866         if (!InsertBefore)
2867           return nullptr;
2868 
2869         // The requested index identifies a part of a nested aggregate. Handle
2870         // this specially. For example,
2871         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2872         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2873         // %C = extractvalue {i32, { i32, i32 } } %B, 1
2874         // This can be changed into
2875         // %A = insertvalue {i32, i32 } undef, i32 10, 0
2876         // %C = insertvalue {i32, i32 } %A, i32 11, 1
2877         // which allows the unused 0,0 element from the nested struct to be
2878         // removed.
2879         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2880                                  InsertBefore);
2881       }
2882 
2883       // This insert value inserts something else than what we are looking for.
2884       // See if the (aggregate) value inserted into has the value we are
2885       // looking for, then.
2886       if (*req_idx != *i)
2887         return FindInsertedValue(I->getAggregateOperand(), idx_range,
2888                                  InsertBefore);
2889     }
2890     // If we end up here, the indices of the insertvalue match with those
2891     // requested (though possibly only partially). Now we recursively look at
2892     // the inserted value, passing any remaining indices.
2893     return FindInsertedValue(I->getInsertedValueOperand(),
2894                              makeArrayRef(req_idx, idx_range.end()),
2895                              InsertBefore);
2896   }
2897 
2898   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2899     // If we're extracting a value from an aggregate that was extracted from
2900     // something else, we can extract from that something else directly instead.
2901     // However, we will need to chain I's indices with the requested indices.
2902 
2903     // Calculate the number of indices required
2904     unsigned size = I->getNumIndices() + idx_range.size();
2905     // Allocate some space to put the new indices in
2906     SmallVector<unsigned, 5> Idxs;
2907     Idxs.reserve(size);
2908     // Add indices from the extract value instruction
2909     Idxs.append(I->idx_begin(), I->idx_end());
2910 
2911     // Add requested indices
2912     Idxs.append(idx_range.begin(), idx_range.end());
2913 
2914     assert(Idxs.size() == size
2915            && "Number of indices added not correct?");
2916 
2917     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2918   }
2919   // Otherwise, we don't know (such as, extracting from a function return value
2920   // or load instruction)
2921   return nullptr;
2922 }
2923 
2924 /// Analyze the specified pointer to see if it can be expressed as a base
2925 /// pointer plus a constant offset. Return the base and offset to the caller.
2926 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2927                                               const DataLayout &DL) {
2928   unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2929   APInt ByteOffset(BitWidth, 0);
2930 
2931   // We walk up the defs but use a visited set to handle unreachable code. In
2932   // that case, we stop after accumulating the cycle once (not that it
2933   // matters).
2934   SmallPtrSet<Value *, 16> Visited;
2935   while (Visited.insert(Ptr).second) {
2936     if (Ptr->getType()->isVectorTy())
2937       break;
2938 
2939     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2940       // If one of the values we have visited is an addrspacecast, then
2941       // the pointer type of this GEP may be different from the type
2942       // of the Ptr parameter which was passed to this function.  This
2943       // means when we construct GEPOffset, we need to use the size
2944       // of GEP's pointer type rather than the size of the original
2945       // pointer type.
2946       APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
2947       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2948         break;
2949 
2950       ByteOffset += GEPOffset.getSExtValue();
2951 
2952       Ptr = GEP->getPointerOperand();
2953     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2954                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2955       Ptr = cast<Operator>(Ptr)->getOperand(0);
2956     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2957       if (GA->isInterposable())
2958         break;
2959       Ptr = GA->getAliasee();
2960     } else {
2961       break;
2962     }
2963   }
2964   Offset = ByteOffset.getSExtValue();
2965   return Ptr;
2966 }
2967 
2968 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
2969                                        unsigned CharSize) {
2970   // Make sure the GEP has exactly three arguments.
2971   if (GEP->getNumOperands() != 3)
2972     return false;
2973 
2974   // Make sure the index-ee is a pointer to array of \p CharSize integers.
2975   // CharSize.
2976   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
2977   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
2978     return false;
2979 
2980   // Check to make sure that the first operand of the GEP is an integer and
2981   // has value 0 so that we are sure we're indexing into the initializer.
2982   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
2983   if (!FirstIdx || !FirstIdx->isZero())
2984     return false;
2985 
2986   return true;
2987 }
2988 
2989 bool llvm::getConstantDataArrayInfo(const Value *V,
2990                                     ConstantDataArraySlice &Slice,
2991                                     unsigned ElementSize, uint64_t Offset) {
2992   assert(V);
2993 
2994   // Look through bitcast instructions and geps.
2995   V = V->stripPointerCasts();
2996 
2997   // If the value is a GEP instruction or constant expression, treat it as an
2998   // offset.
2999   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3000     // The GEP operator should be based on a pointer to string constant, and is
3001     // indexing into the string constant.
3002     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3003       return false;
3004 
3005     // If the second index isn't a ConstantInt, then this is a variable index
3006     // into the array.  If this occurs, we can't say anything meaningful about
3007     // the string.
3008     uint64_t StartIdx = 0;
3009     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3010       StartIdx = CI->getZExtValue();
3011     else
3012       return false;
3013     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3014                                     StartIdx + Offset);
3015   }
3016 
3017   // The GEP instruction, constant or instruction, must reference a global
3018   // variable that is a constant and is initialized. The referenced constant
3019   // initializer is the array that we'll use for optimization.
3020   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3021   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3022     return false;
3023 
3024   const ConstantDataArray *Array;
3025   ArrayType *ArrayTy;
3026   if (GV->getInitializer()->isNullValue()) {
3027     Type *GVTy = GV->getValueType();
3028     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3029       // A zeroinitializer for the array; There is no ConstantDataArray.
3030       Array = nullptr;
3031     } else {
3032       const DataLayout &DL = GV->getParent()->getDataLayout();
3033       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3034       uint64_t Length = SizeInBytes / (ElementSize / 8);
3035       if (Length <= Offset)
3036         return false;
3037 
3038       Slice.Array = nullptr;
3039       Slice.Offset = 0;
3040       Slice.Length = Length - Offset;
3041       return true;
3042     }
3043   } else {
3044     // This must be a ConstantDataArray.
3045     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3046     if (!Array)
3047       return false;
3048     ArrayTy = Array->getType();
3049   }
3050   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3051     return false;
3052 
3053   uint64_t NumElts = ArrayTy->getArrayNumElements();
3054   if (Offset > NumElts)
3055     return false;
3056 
3057   Slice.Array = Array;
3058   Slice.Offset = Offset;
3059   Slice.Length = NumElts - Offset;
3060   return true;
3061 }
3062 
3063 /// This function computes the length of a null-terminated C string pointed to
3064 /// by V. If successful, it returns true and returns the string in Str.
3065 /// If unsuccessful, it returns false.
3066 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3067                                  uint64_t Offset, bool TrimAtNul) {
3068   ConstantDataArraySlice Slice;
3069   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3070     return false;
3071 
3072   if (Slice.Array == nullptr) {
3073     if (TrimAtNul) {
3074       Str = StringRef();
3075       return true;
3076     }
3077     if (Slice.Length == 1) {
3078       Str = StringRef("", 1);
3079       return true;
3080     }
3081     // We cannot instantiate a StringRef as we do not have an apropriate string
3082     // of 0s at hand.
3083     return false;
3084   }
3085 
3086   // Start out with the entire array in the StringRef.
3087   Str = Slice.Array->getAsString();
3088   // Skip over 'offset' bytes.
3089   Str = Str.substr(Slice.Offset);
3090 
3091   if (TrimAtNul) {
3092     // Trim off the \0 and anything after it.  If the array is not nul
3093     // terminated, we just return the whole end of string.  The client may know
3094     // some other way that the string is length-bound.
3095     Str = Str.substr(0, Str.find('\0'));
3096   }
3097   return true;
3098 }
3099 
3100 // These next two are very similar to the above, but also look through PHI
3101 // nodes.
3102 // TODO: See if we can integrate these two together.
3103 
3104 /// If we can compute the length of the string pointed to by
3105 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3106 static uint64_t GetStringLengthH(const Value *V,
3107                                  SmallPtrSetImpl<const PHINode*> &PHIs,
3108                                  unsigned CharSize) {
3109   // Look through noop bitcast instructions.
3110   V = V->stripPointerCasts();
3111 
3112   // If this is a PHI node, there are two cases: either we have already seen it
3113   // or we haven't.
3114   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3115     if (!PHIs.insert(PN).second)
3116       return ~0ULL;  // already in the set.
3117 
3118     // If it was new, see if all the input strings are the same length.
3119     uint64_t LenSoFar = ~0ULL;
3120     for (Value *IncValue : PN->incoming_values()) {
3121       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3122       if (Len == 0) return 0; // Unknown length -> unknown.
3123 
3124       if (Len == ~0ULL) continue;
3125 
3126       if (Len != LenSoFar && LenSoFar != ~0ULL)
3127         return 0;    // Disagree -> unknown.
3128       LenSoFar = Len;
3129     }
3130 
3131     // Success, all agree.
3132     return LenSoFar;
3133   }
3134 
3135   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3136   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3137     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3138     if (Len1 == 0) return 0;
3139     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3140     if (Len2 == 0) return 0;
3141     if (Len1 == ~0ULL) return Len2;
3142     if (Len2 == ~0ULL) return Len1;
3143     if (Len1 != Len2) return 0;
3144     return Len1;
3145   }
3146 
3147   // Otherwise, see if we can read the string.
3148   ConstantDataArraySlice Slice;
3149   if (!getConstantDataArrayInfo(V, Slice, CharSize))
3150     return 0;
3151 
3152   if (Slice.Array == nullptr)
3153     return 1;
3154 
3155   // Search for nul characters
3156   unsigned NullIndex = 0;
3157   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3158     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3159       break;
3160   }
3161 
3162   return NullIndex + 1;
3163 }
3164 
3165 /// If we can compute the length of the string pointed to by
3166 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3167 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3168   if (!V->getType()->isPointerTy()) return 0;
3169 
3170   SmallPtrSet<const PHINode*, 32> PHIs;
3171   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3172   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3173   // an empty string as a length.
3174   return Len == ~0ULL ? 1 : Len;
3175 }
3176 
3177 /// \brief \p PN defines a loop-variant pointer to an object.  Check if the
3178 /// previous iteration of the loop was referring to the same object as \p PN.
3179 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3180                                          const LoopInfo *LI) {
3181   // Find the loop-defined value.
3182   Loop *L = LI->getLoopFor(PN->getParent());
3183   if (PN->getNumIncomingValues() != 2)
3184     return true;
3185 
3186   // Find the value from previous iteration.
3187   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3188   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3189     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3190   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3191     return true;
3192 
3193   // If a new pointer is loaded in the loop, the pointer references a different
3194   // object in every iteration.  E.g.:
3195   //    for (i)
3196   //       int *p = a[i];
3197   //       ...
3198   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3199     if (!L->isLoopInvariant(Load->getPointerOperand()))
3200       return false;
3201   return true;
3202 }
3203 
3204 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3205                                  unsigned MaxLookup) {
3206   if (!V->getType()->isPointerTy())
3207     return V;
3208   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3209     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3210       V = GEP->getPointerOperand();
3211     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3212                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3213       V = cast<Operator>(V)->getOperand(0);
3214     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3215       if (GA->isInterposable())
3216         return V;
3217       V = GA->getAliasee();
3218     } else if (isa<AllocaInst>(V)) {
3219       // An alloca can't be further simplified.
3220       return V;
3221     } else {
3222       if (auto CS = CallSite(V))
3223         if (Value *RV = CS.getReturnedArgOperand()) {
3224           V = RV;
3225           continue;
3226         }
3227 
3228       // See if InstructionSimplify knows any relevant tricks.
3229       if (Instruction *I = dyn_cast<Instruction>(V))
3230         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3231         if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3232           V = Simplified;
3233           continue;
3234         }
3235 
3236       return V;
3237     }
3238     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3239   }
3240   return V;
3241 }
3242 
3243 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3244                                 const DataLayout &DL, LoopInfo *LI,
3245                                 unsigned MaxLookup) {
3246   SmallPtrSet<Value *, 4> Visited;
3247   SmallVector<Value *, 4> Worklist;
3248   Worklist.push_back(V);
3249   do {
3250     Value *P = Worklist.pop_back_val();
3251     P = GetUnderlyingObject(P, DL, MaxLookup);
3252 
3253     if (!Visited.insert(P).second)
3254       continue;
3255 
3256     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3257       Worklist.push_back(SI->getTrueValue());
3258       Worklist.push_back(SI->getFalseValue());
3259       continue;
3260     }
3261 
3262     if (PHINode *PN = dyn_cast<PHINode>(P)) {
3263       // If this PHI changes the underlying object in every iteration of the
3264       // loop, don't look through it.  Consider:
3265       //   int **A;
3266       //   for (i) {
3267       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3268       //     Curr = A[i];
3269       //     *Prev, *Curr;
3270       //
3271       // Prev is tracking Curr one iteration behind so they refer to different
3272       // underlying objects.
3273       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3274           isSameUnderlyingObjectInLoop(PN, LI))
3275         for (Value *IncValue : PN->incoming_values())
3276           Worklist.push_back(IncValue);
3277       continue;
3278     }
3279 
3280     Objects.push_back(P);
3281   } while (!Worklist.empty());
3282 }
3283 
3284 /// Return true if the only users of this pointer are lifetime markers.
3285 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3286   for (const User *U : V->users()) {
3287     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3288     if (!II) return false;
3289 
3290     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3291         II->getIntrinsicID() != Intrinsic::lifetime_end)
3292       return false;
3293   }
3294   return true;
3295 }
3296 
3297 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3298                                         const Instruction *CtxI,
3299                                         const DominatorTree *DT) {
3300   const Operator *Inst = dyn_cast<Operator>(V);
3301   if (!Inst)
3302     return false;
3303 
3304   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3305     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3306       if (C->canTrap())
3307         return false;
3308 
3309   switch (Inst->getOpcode()) {
3310   default:
3311     return true;
3312   case Instruction::UDiv:
3313   case Instruction::URem: {
3314     // x / y is undefined if y == 0.
3315     const APInt *V;
3316     if (match(Inst->getOperand(1), m_APInt(V)))
3317       return *V != 0;
3318     return false;
3319   }
3320   case Instruction::SDiv:
3321   case Instruction::SRem: {
3322     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3323     const APInt *Numerator, *Denominator;
3324     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3325       return false;
3326     // We cannot hoist this division if the denominator is 0.
3327     if (*Denominator == 0)
3328       return false;
3329     // It's safe to hoist if the denominator is not 0 or -1.
3330     if (*Denominator != -1)
3331       return true;
3332     // At this point we know that the denominator is -1.  It is safe to hoist as
3333     // long we know that the numerator is not INT_MIN.
3334     if (match(Inst->getOperand(0), m_APInt(Numerator)))
3335       return !Numerator->isMinSignedValue();
3336     // The numerator *might* be MinSignedValue.
3337     return false;
3338   }
3339   case Instruction::Load: {
3340     const LoadInst *LI = cast<LoadInst>(Inst);
3341     if (!LI->isUnordered() ||
3342         // Speculative load may create a race that did not exist in the source.
3343         LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3344         // Speculative load may load data from dirty regions.
3345         LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress))
3346       return false;
3347     const DataLayout &DL = LI->getModule()->getDataLayout();
3348     return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3349                                               LI->getAlignment(), DL, CtxI, DT);
3350   }
3351   case Instruction::Call: {
3352     auto *CI = cast<const CallInst>(Inst);
3353     const Function *Callee = CI->getCalledFunction();
3354 
3355     // The called function could have undefined behavior or side-effects, even
3356     // if marked readnone nounwind.
3357     return Callee && Callee->isSpeculatable();
3358   }
3359   case Instruction::VAArg:
3360   case Instruction::Alloca:
3361   case Instruction::Invoke:
3362   case Instruction::PHI:
3363   case Instruction::Store:
3364   case Instruction::Ret:
3365   case Instruction::Br:
3366   case Instruction::IndirectBr:
3367   case Instruction::Switch:
3368   case Instruction::Unreachable:
3369   case Instruction::Fence:
3370   case Instruction::AtomicRMW:
3371   case Instruction::AtomicCmpXchg:
3372   case Instruction::LandingPad:
3373   case Instruction::Resume:
3374   case Instruction::CatchSwitch:
3375   case Instruction::CatchPad:
3376   case Instruction::CatchRet:
3377   case Instruction::CleanupPad:
3378   case Instruction::CleanupRet:
3379     return false; // Misc instructions which have effects
3380   }
3381 }
3382 
3383 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3384   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3385 }
3386 
3387 /// Return true if we know that the specified value is never null.
3388 bool llvm::isKnownNonNull(const Value *V) {
3389   assert(V->getType()->isPointerTy() && "V must be pointer type");
3390 
3391   // Alloca never returns null, malloc might.
3392   if (isa<AllocaInst>(V)) return true;
3393 
3394   // A byval, inalloca, or nonnull argument is never null.
3395   if (const Argument *A = dyn_cast<Argument>(V))
3396     return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3397 
3398   // A global variable in address space 0 is non null unless extern weak
3399   // or an absolute symbol reference. Other address spaces may have null as a
3400   // valid address for a global, so we can't assume anything.
3401   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3402     return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3403            GV->getType()->getAddressSpace() == 0;
3404 
3405   // A Load tagged with nonnull metadata is never null.
3406   if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3407     return LI->getMetadata(LLVMContext::MD_nonnull);
3408 
3409   if (auto CS = ImmutableCallSite(V))
3410     if (CS.isReturnNonNull())
3411       return true;
3412 
3413   return false;
3414 }
3415 
3416 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3417                                                   const Instruction *CtxI,
3418                                                   const DominatorTree *DT) {
3419   assert(V->getType()->isPointerTy() && "V must be pointer type");
3420   assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
3421   assert(CtxI && "Context instruction required for analysis");
3422   assert(DT && "Dominator tree required for analysis");
3423 
3424   unsigned NumUsesExplored = 0;
3425   for (auto *U : V->users()) {
3426     // Avoid massive lists
3427     if (NumUsesExplored >= DomConditionsMaxUses)
3428       break;
3429     NumUsesExplored++;
3430 
3431     // If the value is used as an argument to a call or invoke, then argument
3432     // attributes may provide an answer about null-ness.
3433     if (auto CS = ImmutableCallSite(U))
3434       if (auto *CalledFunc = CS.getCalledFunction())
3435         for (const Argument &Arg : CalledFunc->args())
3436           if (CS.getArgOperand(Arg.getArgNo()) == V &&
3437               Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
3438             return true;
3439 
3440     // Consider only compare instructions uniquely controlling a branch
3441     CmpInst::Predicate Pred;
3442     if (!match(const_cast<User *>(U),
3443                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
3444         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
3445       continue;
3446 
3447     for (auto *CmpU : U->users()) {
3448       if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
3449         assert(BI->isConditional() && "uses a comparison!");
3450 
3451         BasicBlock *NonNullSuccessor =
3452             BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
3453         BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3454         if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3455           return true;
3456       } else if (Pred == ICmpInst::ICMP_NE &&
3457                  match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
3458                  DT->dominates(cast<Instruction>(CmpU), CtxI)) {
3459         return true;
3460       }
3461     }
3462   }
3463 
3464   return false;
3465 }
3466 
3467 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3468                             const DominatorTree *DT) {
3469   if (isa<ConstantPointerNull>(V) || isa<UndefValue>(V))
3470     return false;
3471 
3472   if (isKnownNonNull(V))
3473     return true;
3474 
3475   if (!CtxI || !DT)
3476     return false;
3477 
3478   return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT);
3479 }
3480 
3481 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3482                                                    const Value *RHS,
3483                                                    const DataLayout &DL,
3484                                                    AssumptionCache *AC,
3485                                                    const Instruction *CxtI,
3486                                                    const DominatorTree *DT) {
3487   // Multiplying n * m significant bits yields a result of n + m significant
3488   // bits. If the total number of significant bits does not exceed the
3489   // result bit width (minus 1), there is no overflow.
3490   // This means if we have enough leading zero bits in the operands
3491   // we can guarantee that the result does not overflow.
3492   // Ref: "Hacker's Delight" by Henry Warren
3493   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3494   KnownBits LHSKnown(BitWidth);
3495   KnownBits RHSKnown(BitWidth);
3496   computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3497   computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3498   // Note that underestimating the number of zero bits gives a more
3499   // conservative answer.
3500   unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
3501                       RHSKnown.countMinLeadingZeros();
3502   // First handle the easy case: if we have enough zero bits there's
3503   // definitely no overflow.
3504   if (ZeroBits >= BitWidth)
3505     return OverflowResult::NeverOverflows;
3506 
3507   // Get the largest possible values for each operand.
3508   APInt LHSMax = ~LHSKnown.Zero;
3509   APInt RHSMax = ~RHSKnown.Zero;
3510 
3511   // We know the multiply operation doesn't overflow if the maximum values for
3512   // each operand will not overflow after we multiply them together.
3513   bool MaxOverflow;
3514   (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3515   if (!MaxOverflow)
3516     return OverflowResult::NeverOverflows;
3517 
3518   // We know it always overflows if multiplying the smallest possible values for
3519   // the operands also results in overflow.
3520   bool MinOverflow;
3521   (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
3522   if (MinOverflow)
3523     return OverflowResult::AlwaysOverflows;
3524 
3525   return OverflowResult::MayOverflow;
3526 }
3527 
3528 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3529                                                    const Value *RHS,
3530                                                    const DataLayout &DL,
3531                                                    AssumptionCache *AC,
3532                                                    const Instruction *CxtI,
3533                                                    const DominatorTree *DT) {
3534   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3535   if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
3536     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3537 
3538     if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
3539       // The sign bit is set in both cases: this MUST overflow.
3540       // Create a simple add instruction, and insert it into the struct.
3541       return OverflowResult::AlwaysOverflows;
3542     }
3543 
3544     if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
3545       // The sign bit is clear in both cases: this CANNOT overflow.
3546       // Create a simple add instruction, and insert it into the struct.
3547       return OverflowResult::NeverOverflows;
3548     }
3549   }
3550 
3551   return OverflowResult::MayOverflow;
3552 }
3553 
3554 /// \brief Return true if we can prove that adding the two values of the
3555 /// knownbits will not overflow.
3556 /// Otherwise return false.
3557 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
3558                                     const KnownBits &RHSKnown) {
3559   // Addition of two 2's complement numbers having opposite signs will never
3560   // overflow.
3561   if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
3562       (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
3563     return true;
3564 
3565   // If either of the values is known to be non-negative, adding them can only
3566   // overflow if the second is also non-negative, so we can assume that.
3567   // Two non-negative numbers will only overflow if there is a carry to the
3568   // sign bit, so we can check if even when the values are as big as possible
3569   // there is no overflow to the sign bit.
3570   if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
3571     APInt MaxLHS = ~LHSKnown.Zero;
3572     MaxLHS.clearSignBit();
3573     APInt MaxRHS = ~RHSKnown.Zero;
3574     MaxRHS.clearSignBit();
3575     APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
3576     return Result.isSignBitClear();
3577   }
3578 
3579   // If either of the values is known to be negative, adding them can only
3580   // overflow if the second is also negative, so we can assume that.
3581   // Two negative number will only overflow if there is no carry to the sign
3582   // bit, so we can check if even when the values are as small as possible
3583   // there is overflow to the sign bit.
3584   if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
3585     APInt MinLHS = LHSKnown.One;
3586     MinLHS.clearSignBit();
3587     APInt MinRHS = RHSKnown.One;
3588     MinRHS.clearSignBit();
3589     APInt Result = std::move(MinLHS) + std::move(MinRHS);
3590     return Result.isSignBitSet();
3591   }
3592 
3593   // If we reached here it means that we know nothing about the sign bits.
3594   // In this case we can't know if there will be an overflow, since by
3595   // changing the sign bits any two values can be made to overflow.
3596   return false;
3597 }
3598 
3599 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3600                                                   const Value *RHS,
3601                                                   const AddOperator *Add,
3602                                                   const DataLayout &DL,
3603                                                   AssumptionCache *AC,
3604                                                   const Instruction *CxtI,
3605                                                   const DominatorTree *DT) {
3606   if (Add && Add->hasNoSignedWrap()) {
3607     return OverflowResult::NeverOverflows;
3608   }
3609 
3610   // If LHS and RHS each have at least two sign bits, the addition will look
3611   // like
3612   //
3613   // XX..... +
3614   // YY.....
3615   //
3616   // If the carry into the most significant position is 0, X and Y can't both
3617   // be 1 and therefore the carry out of the addition is also 0.
3618   //
3619   // If the carry into the most significant position is 1, X and Y can't both
3620   // be 0 and therefore the carry out of the addition is also 1.
3621   //
3622   // Since the carry into the most significant position is always equal to
3623   // the carry out of the addition, there is no signed overflow.
3624   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3625       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3626     return OverflowResult::NeverOverflows;
3627 
3628   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3629   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3630 
3631   if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
3632     return OverflowResult::NeverOverflows;
3633 
3634   // The remaining code needs Add to be available. Early returns if not so.
3635   if (!Add)
3636     return OverflowResult::MayOverflow;
3637 
3638   // If the sign of Add is the same as at least one of the operands, this add
3639   // CANNOT overflow. This is particularly useful when the sum is
3640   // @llvm.assume'ed non-negative rather than proved so from analyzing its
3641   // operands.
3642   bool LHSOrRHSKnownNonNegative =
3643       (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
3644   bool LHSOrRHSKnownNegative =
3645       (LHSKnown.isNegative() || RHSKnown.isNegative());
3646   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3647     KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
3648     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
3649         (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
3650       return OverflowResult::NeverOverflows;
3651     }
3652   }
3653 
3654   return OverflowResult::MayOverflow;
3655 }
3656 
3657 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3658                                      const DominatorTree &DT) {
3659 #ifndef NDEBUG
3660   auto IID = II->getIntrinsicID();
3661   assert((IID == Intrinsic::sadd_with_overflow ||
3662           IID == Intrinsic::uadd_with_overflow ||
3663           IID == Intrinsic::ssub_with_overflow ||
3664           IID == Intrinsic::usub_with_overflow ||
3665           IID == Intrinsic::smul_with_overflow ||
3666           IID == Intrinsic::umul_with_overflow) &&
3667          "Not an overflow intrinsic!");
3668 #endif
3669 
3670   SmallVector<const BranchInst *, 2> GuardingBranches;
3671   SmallVector<const ExtractValueInst *, 2> Results;
3672 
3673   for (const User *U : II->users()) {
3674     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3675       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3676 
3677       if (EVI->getIndices()[0] == 0)
3678         Results.push_back(EVI);
3679       else {
3680         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3681 
3682         for (const auto *U : EVI->users())
3683           if (const auto *B = dyn_cast<BranchInst>(U)) {
3684             assert(B->isConditional() && "How else is it using an i1?");
3685             GuardingBranches.push_back(B);
3686           }
3687       }
3688     } else {
3689       // We are using the aggregate directly in a way we don't want to analyze
3690       // here (storing it to a global, say).
3691       return false;
3692     }
3693   }
3694 
3695   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3696     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3697     if (!NoWrapEdge.isSingleEdge())
3698       return false;
3699 
3700     // Check if all users of the add are provably no-wrap.
3701     for (const auto *Result : Results) {
3702       // If the extractvalue itself is not executed on overflow, the we don't
3703       // need to check each use separately, since domination is transitive.
3704       if (DT.dominates(NoWrapEdge, Result->getParent()))
3705         continue;
3706 
3707       for (auto &RU : Result->uses())
3708         if (!DT.dominates(NoWrapEdge, RU))
3709           return false;
3710     }
3711 
3712     return true;
3713   };
3714 
3715   return any_of(GuardingBranches, AllUsesGuardedByBranch);
3716 }
3717 
3718 
3719 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3720                                                  const DataLayout &DL,
3721                                                  AssumptionCache *AC,
3722                                                  const Instruction *CxtI,
3723                                                  const DominatorTree *DT) {
3724   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3725                                        Add, DL, AC, CxtI, DT);
3726 }
3727 
3728 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3729                                                  const Value *RHS,
3730                                                  const DataLayout &DL,
3731                                                  AssumptionCache *AC,
3732                                                  const Instruction *CxtI,
3733                                                  const DominatorTree *DT) {
3734   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3735 }
3736 
3737 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3738   // A memory operation returns normally if it isn't volatile. A volatile
3739   // operation is allowed to trap.
3740   //
3741   // An atomic operation isn't guaranteed to return in a reasonable amount of
3742   // time because it's possible for another thread to interfere with it for an
3743   // arbitrary length of time, but programs aren't allowed to rely on that.
3744   if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3745     return !LI->isVolatile();
3746   if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3747     return !SI->isVolatile();
3748   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3749     return !CXI->isVolatile();
3750   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3751     return !RMWI->isVolatile();
3752   if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3753     return !MII->isVolatile();
3754 
3755   // If there is no successor, then execution can't transfer to it.
3756   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3757     return !CRI->unwindsToCaller();
3758   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3759     return !CatchSwitch->unwindsToCaller();
3760   if (isa<ResumeInst>(I))
3761     return false;
3762   if (isa<ReturnInst>(I))
3763     return false;
3764   if (isa<UnreachableInst>(I))
3765     return false;
3766 
3767   // Calls can throw, or contain an infinite loop, or kill the process.
3768   if (auto CS = ImmutableCallSite(I)) {
3769     // Call sites that throw have implicit non-local control flow.
3770     if (!CS.doesNotThrow())
3771       return false;
3772 
3773     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3774     // etc. and thus not return.  However, LLVM already assumes that
3775     //
3776     //  - Thread exiting actions are modeled as writes to memory invisible to
3777     //    the program.
3778     //
3779     //  - Loops that don't have side effects (side effects are volatile/atomic
3780     //    stores and IO) always terminate (see http://llvm.org/PR965).
3781     //    Furthermore IO itself is also modeled as writes to memory invisible to
3782     //    the program.
3783     //
3784     // We rely on those assumptions here, and use the memory effects of the call
3785     // target as a proxy for checking that it always returns.
3786 
3787     // FIXME: This isn't aggressive enough; a call which only writes to a global
3788     // is guaranteed to return.
3789     return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3790            match(I, m_Intrinsic<Intrinsic::assume>());
3791   }
3792 
3793   // Other instructions return normally.
3794   return true;
3795 }
3796 
3797 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3798                                                   const Loop *L) {
3799   // The loop header is guaranteed to be executed for every iteration.
3800   //
3801   // FIXME: Relax this constraint to cover all basic blocks that are
3802   // guaranteed to be executed at every iteration.
3803   if (I->getParent() != L->getHeader()) return false;
3804 
3805   for (const Instruction &LI : *L->getHeader()) {
3806     if (&LI == I) return true;
3807     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3808   }
3809   llvm_unreachable("Instruction not contained in its own parent basic block.");
3810 }
3811 
3812 bool llvm::propagatesFullPoison(const Instruction *I) {
3813   switch (I->getOpcode()) {
3814   case Instruction::Add:
3815   case Instruction::Sub:
3816   case Instruction::Xor:
3817   case Instruction::Trunc:
3818   case Instruction::BitCast:
3819   case Instruction::AddrSpaceCast:
3820   case Instruction::Mul:
3821   case Instruction::Shl:
3822   case Instruction::GetElementPtr:
3823     // These operations all propagate poison unconditionally. Note that poison
3824     // is not any particular value, so xor or subtraction of poison with
3825     // itself still yields poison, not zero.
3826     return true;
3827 
3828   case Instruction::AShr:
3829   case Instruction::SExt:
3830     // For these operations, one bit of the input is replicated across
3831     // multiple output bits. A replicated poison bit is still poison.
3832     return true;
3833 
3834   case Instruction::ICmp:
3835     // Comparing poison with any value yields poison.  This is why, for
3836     // instance, x s< (x +nsw 1) can be folded to true.
3837     return true;
3838 
3839   default:
3840     return false;
3841   }
3842 }
3843 
3844 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3845   switch (I->getOpcode()) {
3846     case Instruction::Store:
3847       return cast<StoreInst>(I)->getPointerOperand();
3848 
3849     case Instruction::Load:
3850       return cast<LoadInst>(I)->getPointerOperand();
3851 
3852     case Instruction::AtomicCmpXchg:
3853       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3854 
3855     case Instruction::AtomicRMW:
3856       return cast<AtomicRMWInst>(I)->getPointerOperand();
3857 
3858     case Instruction::UDiv:
3859     case Instruction::SDiv:
3860     case Instruction::URem:
3861     case Instruction::SRem:
3862       return I->getOperand(1);
3863 
3864     default:
3865       return nullptr;
3866   }
3867 }
3868 
3869 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
3870   // We currently only look for uses of poison values within the same basic
3871   // block, as that makes it easier to guarantee that the uses will be
3872   // executed given that PoisonI is executed.
3873   //
3874   // FIXME: Expand this to consider uses beyond the same basic block. To do
3875   // this, look out for the distinction between post-dominance and strong
3876   // post-dominance.
3877   const BasicBlock *BB = PoisonI->getParent();
3878 
3879   // Set of instructions that we have proved will yield poison if PoisonI
3880   // does.
3881   SmallSet<const Value *, 16> YieldsPoison;
3882   SmallSet<const BasicBlock *, 4> Visited;
3883   YieldsPoison.insert(PoisonI);
3884   Visited.insert(PoisonI->getParent());
3885 
3886   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
3887 
3888   unsigned Iter = 0;
3889   while (Iter++ < MaxDepth) {
3890     for (auto &I : make_range(Begin, End)) {
3891       if (&I != PoisonI) {
3892         const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
3893         if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
3894           return true;
3895         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
3896           return false;
3897       }
3898 
3899       // Mark poison that propagates from I through uses of I.
3900       if (YieldsPoison.count(&I)) {
3901         for (const User *User : I.users()) {
3902           const Instruction *UserI = cast<Instruction>(User);
3903           if (propagatesFullPoison(UserI))
3904             YieldsPoison.insert(User);
3905         }
3906       }
3907     }
3908 
3909     if (auto *NextBB = BB->getSingleSuccessor()) {
3910       if (Visited.insert(NextBB).second) {
3911         BB = NextBB;
3912         Begin = BB->getFirstNonPHI()->getIterator();
3913         End = BB->end();
3914         continue;
3915       }
3916     }
3917 
3918     break;
3919   };
3920   return false;
3921 }
3922 
3923 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
3924   if (FMF.noNaNs())
3925     return true;
3926 
3927   if (auto *C = dyn_cast<ConstantFP>(V))
3928     return !C->isNaN();
3929   return false;
3930 }
3931 
3932 static bool isKnownNonZero(const Value *V) {
3933   if (auto *C = dyn_cast<ConstantFP>(V))
3934     return !C->isZero();
3935   return false;
3936 }
3937 
3938 /// Match non-obvious integer minimum and maximum sequences.
3939 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
3940                                        Value *CmpLHS, Value *CmpRHS,
3941                                        Value *TrueVal, Value *FalseVal,
3942                                        Value *&LHS, Value *&RHS) {
3943   // Assume success. If there's no match, callers should not use these anyway.
3944   LHS = TrueVal;
3945   RHS = FalseVal;
3946 
3947   // Recognize variations of:
3948   // CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
3949   const APInt *C1;
3950   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
3951     const APInt *C2;
3952 
3953     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
3954     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
3955         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
3956       return {SPF_SMAX, SPNB_NA, false};
3957 
3958     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
3959     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
3960         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
3961       return {SPF_SMIN, SPNB_NA, false};
3962 
3963     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
3964     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
3965         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
3966       return {SPF_UMAX, SPNB_NA, false};
3967 
3968     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
3969     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
3970         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
3971       return {SPF_UMIN, SPNB_NA, false};
3972   }
3973 
3974   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
3975     return {SPF_UNKNOWN, SPNB_NA, false};
3976 
3977   // Z = X -nsw Y
3978   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
3979   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
3980   if (match(TrueVal, m_Zero()) &&
3981       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
3982     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
3983 
3984   // Z = X -nsw Y
3985   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
3986   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
3987   if (match(FalseVal, m_Zero()) &&
3988       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
3989     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
3990 
3991   if (!match(CmpRHS, m_APInt(C1)))
3992     return {SPF_UNKNOWN, SPNB_NA, false};
3993 
3994   // An unsigned min/max can be written with a signed compare.
3995   const APInt *C2;
3996   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
3997       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
3998     // Is the sign bit set?
3999     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4000     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4001     if (Pred == CmpInst::ICMP_SLT && *C1 == 0 && C2->isMaxSignedValue())
4002       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4003 
4004     // Is the sign bit clear?
4005     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4006     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4007     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4008         C2->isMinSignedValue())
4009       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4010   }
4011 
4012   // Look through 'not' ops to find disguised signed min/max.
4013   // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4014   // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4015   if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4016       match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4017     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4018 
4019   // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4020   // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4021   if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4022       match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4023     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4024 
4025   return {SPF_UNKNOWN, SPNB_NA, false};
4026 }
4027 
4028 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4029                                               FastMathFlags FMF,
4030                                               Value *CmpLHS, Value *CmpRHS,
4031                                               Value *TrueVal, Value *FalseVal,
4032                                               Value *&LHS, Value *&RHS) {
4033   LHS = CmpLHS;
4034   RHS = CmpRHS;
4035 
4036   // If the predicate is an "or-equal"  (FP) predicate, then signed zeroes may
4037   // return inconsistent results between implementations.
4038   //   (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4039   //   minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4040   // Therefore we behave conservatively and only proceed if at least one of the
4041   // operands is known to not be zero, or if we don't care about signed zeroes.
4042   switch (Pred) {
4043   default: break;
4044   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4045   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4046     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4047         !isKnownNonZero(CmpRHS))
4048       return {SPF_UNKNOWN, SPNB_NA, false};
4049   }
4050 
4051   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4052   bool Ordered = false;
4053 
4054   // When given one NaN and one non-NaN input:
4055   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4056   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4057   //     ordered comparison fails), which could be NaN or non-NaN.
4058   // so here we discover exactly what NaN behavior is required/accepted.
4059   if (CmpInst::isFPPredicate(Pred)) {
4060     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4061     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4062 
4063     if (LHSSafe && RHSSafe) {
4064       // Both operands are known non-NaN.
4065       NaNBehavior = SPNB_RETURNS_ANY;
4066     } else if (CmpInst::isOrdered(Pred)) {
4067       // An ordered comparison will return false when given a NaN, so it
4068       // returns the RHS.
4069       Ordered = true;
4070       if (LHSSafe)
4071         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4072         NaNBehavior = SPNB_RETURNS_NAN;
4073       else if (RHSSafe)
4074         NaNBehavior = SPNB_RETURNS_OTHER;
4075       else
4076         // Completely unsafe.
4077         return {SPF_UNKNOWN, SPNB_NA, false};
4078     } else {
4079       Ordered = false;
4080       // An unordered comparison will return true when given a NaN, so it
4081       // returns the LHS.
4082       if (LHSSafe)
4083         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4084         NaNBehavior = SPNB_RETURNS_OTHER;
4085       else if (RHSSafe)
4086         NaNBehavior = SPNB_RETURNS_NAN;
4087       else
4088         // Completely unsafe.
4089         return {SPF_UNKNOWN, SPNB_NA, false};
4090     }
4091   }
4092 
4093   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4094     std::swap(CmpLHS, CmpRHS);
4095     Pred = CmpInst::getSwappedPredicate(Pred);
4096     if (NaNBehavior == SPNB_RETURNS_NAN)
4097       NaNBehavior = SPNB_RETURNS_OTHER;
4098     else if (NaNBehavior == SPNB_RETURNS_OTHER)
4099       NaNBehavior = SPNB_RETURNS_NAN;
4100     Ordered = !Ordered;
4101   }
4102 
4103   // ([if]cmp X, Y) ? X : Y
4104   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4105     switch (Pred) {
4106     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4107     case ICmpInst::ICMP_UGT:
4108     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4109     case ICmpInst::ICMP_SGT:
4110     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4111     case ICmpInst::ICMP_ULT:
4112     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4113     case ICmpInst::ICMP_SLT:
4114     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4115     case FCmpInst::FCMP_UGT:
4116     case FCmpInst::FCMP_UGE:
4117     case FCmpInst::FCMP_OGT:
4118     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4119     case FCmpInst::FCMP_ULT:
4120     case FCmpInst::FCMP_ULE:
4121     case FCmpInst::FCMP_OLT:
4122     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4123     }
4124   }
4125 
4126   const APInt *C1;
4127   if (match(CmpRHS, m_APInt(C1))) {
4128     if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4129         (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4130 
4131       // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4132       // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4133       if (Pred == ICmpInst::ICMP_SGT && (*C1 == 0 || C1->isAllOnesValue())) {
4134         return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4135       }
4136 
4137       // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4138       // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4139       if (Pred == ICmpInst::ICMP_SLT && (*C1 == 0 || *C1 == 1)) {
4140         return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4141       }
4142     }
4143   }
4144 
4145   return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4146 }
4147 
4148 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4149                               Instruction::CastOps *CastOp) {
4150   auto *Cast1 = dyn_cast<CastInst>(V1);
4151   if (!Cast1)
4152     return nullptr;
4153 
4154   *CastOp = Cast1->getOpcode();
4155   Type *SrcTy = Cast1->getSrcTy();
4156   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4157     // If V1 and V2 are both the same cast from the same type, look through V1.
4158     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4159       return Cast2->getOperand(0);
4160     return nullptr;
4161   }
4162 
4163   auto *C = dyn_cast<Constant>(V2);
4164   if (!C)
4165     return nullptr;
4166 
4167   Constant *CastedTo = nullptr;
4168   switch (*CastOp) {
4169   case Instruction::ZExt:
4170     if (CmpI->isUnsigned())
4171       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4172     break;
4173   case Instruction::SExt:
4174     if (CmpI->isSigned())
4175       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4176     break;
4177   case Instruction::Trunc:
4178     CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4179     break;
4180   case Instruction::FPTrunc:
4181     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4182     break;
4183   case Instruction::FPExt:
4184     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4185     break;
4186   case Instruction::FPToUI:
4187     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4188     break;
4189   case Instruction::FPToSI:
4190     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4191     break;
4192   case Instruction::UIToFP:
4193     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4194     break;
4195   case Instruction::SIToFP:
4196     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4197     break;
4198   default:
4199     break;
4200   }
4201 
4202   if (!CastedTo)
4203     return nullptr;
4204 
4205   // Make sure the cast doesn't lose any information.
4206   Constant *CastedBack =
4207       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4208   if (CastedBack != C)
4209     return nullptr;
4210 
4211   return CastedTo;
4212 }
4213 
4214 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4215                                              Instruction::CastOps *CastOp) {
4216   SelectInst *SI = dyn_cast<SelectInst>(V);
4217   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4218 
4219   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4220   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4221 
4222   CmpInst::Predicate Pred = CmpI->getPredicate();
4223   Value *CmpLHS = CmpI->getOperand(0);
4224   Value *CmpRHS = CmpI->getOperand(1);
4225   Value *TrueVal = SI->getTrueValue();
4226   Value *FalseVal = SI->getFalseValue();
4227   FastMathFlags FMF;
4228   if (isa<FPMathOperator>(CmpI))
4229     FMF = CmpI->getFastMathFlags();
4230 
4231   // Bail out early.
4232   if (CmpI->isEquality())
4233     return {SPF_UNKNOWN, SPNB_NA, false};
4234 
4235   // Deal with type mismatches.
4236   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4237     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
4238       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4239                                   cast<CastInst>(TrueVal)->getOperand(0), C,
4240                                   LHS, RHS);
4241     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
4242       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4243                                   C, cast<CastInst>(FalseVal)->getOperand(0),
4244                                   LHS, RHS);
4245   }
4246   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4247                               LHS, RHS);
4248 }
4249 
4250 /// Return true if "icmp Pred LHS RHS" is always true.
4251 static bool isTruePredicate(CmpInst::Predicate Pred,
4252                             const Value *LHS, const Value *RHS,
4253                             const DataLayout &DL, unsigned Depth,
4254                             AssumptionCache *AC, const Instruction *CxtI,
4255                             const DominatorTree *DT) {
4256   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4257   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4258     return true;
4259 
4260   switch (Pred) {
4261   default:
4262     return false;
4263 
4264   case CmpInst::ICMP_SLE: {
4265     const APInt *C;
4266 
4267     // LHS s<= LHS +_{nsw} C   if C >= 0
4268     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4269       return !C->isNegative();
4270     return false;
4271   }
4272 
4273   case CmpInst::ICMP_ULE: {
4274     const APInt *C;
4275 
4276     // LHS u<= LHS +_{nuw} C   for any C
4277     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4278       return true;
4279 
4280     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4281     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4282                                        const Value *&X,
4283                                        const APInt *&CA, const APInt *&CB) {
4284       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4285           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4286         return true;
4287 
4288       // If X & C == 0 then (X | C) == X +_{nuw} C
4289       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4290           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4291         KnownBits Known(CA->getBitWidth());
4292         computeKnownBits(X, Known, DL, Depth + 1, AC, CxtI, DT);
4293 
4294         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
4295           return true;
4296       }
4297 
4298       return false;
4299     };
4300 
4301     const Value *X;
4302     const APInt *CLHS, *CRHS;
4303     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4304       return CLHS->ule(*CRHS);
4305 
4306     return false;
4307   }
4308   }
4309 }
4310 
4311 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4312 /// ALHS ARHS" is true.  Otherwise, return None.
4313 static Optional<bool>
4314 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4315                       const Value *ARHS, const Value *BLHS,
4316                       const Value *BRHS, const DataLayout &DL,
4317                       unsigned Depth, AssumptionCache *AC,
4318                       const Instruction *CxtI, const DominatorTree *DT) {
4319   switch (Pred) {
4320   default:
4321     return None;
4322 
4323   case CmpInst::ICMP_SLT:
4324   case CmpInst::ICMP_SLE:
4325     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
4326                         DT) &&
4327         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4328       return true;
4329     return None;
4330 
4331   case CmpInst::ICMP_ULT:
4332   case CmpInst::ICMP_ULE:
4333     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
4334                         DT) &&
4335         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4336       return true;
4337     return None;
4338   }
4339 }
4340 
4341 /// Return true if the operands of the two compares match.  IsSwappedOps is true
4342 /// when the operands match, but are swapped.
4343 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4344                           const Value *BLHS, const Value *BRHS,
4345                           bool &IsSwappedOps) {
4346 
4347   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4348   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4349   return IsMatchingOps || IsSwappedOps;
4350 }
4351 
4352 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4353 /// true.  Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4354 /// BRHS" is false.  Otherwise, return None if we can't infer anything.
4355 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4356                                                     const Value *ALHS,
4357                                                     const Value *ARHS,
4358                                                     CmpInst::Predicate BPred,
4359                                                     const Value *BLHS,
4360                                                     const Value *BRHS,
4361                                                     bool IsSwappedOps) {
4362   // Canonicalize the operands so they're matching.
4363   if (IsSwappedOps) {
4364     std::swap(BLHS, BRHS);
4365     BPred = ICmpInst::getSwappedPredicate(BPred);
4366   }
4367   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4368     return true;
4369   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4370     return false;
4371 
4372   return None;
4373 }
4374 
4375 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4376 /// true.  Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4377 /// C2" is false.  Otherwise, return None if we can't infer anything.
4378 static Optional<bool>
4379 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4380                                  const ConstantInt *C1,
4381                                  CmpInst::Predicate BPred,
4382                                  const Value *BLHS, const ConstantInt *C2) {
4383   assert(ALHS == BLHS && "LHS operands must match.");
4384   ConstantRange DomCR =
4385       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4386   ConstantRange CR =
4387       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4388   ConstantRange Intersection = DomCR.intersectWith(CR);
4389   ConstantRange Difference = DomCR.difference(CR);
4390   if (Intersection.isEmptySet())
4391     return false;
4392   if (Difference.isEmptySet())
4393     return true;
4394   return None;
4395 }
4396 
4397 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4398                                         const DataLayout &DL, bool InvertAPred,
4399                                         unsigned Depth, AssumptionCache *AC,
4400                                         const Instruction *CxtI,
4401                                         const DominatorTree *DT) {
4402   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example.
4403   if (LHS->getType() != RHS->getType())
4404     return None;
4405 
4406   Type *OpTy = LHS->getType();
4407   assert(OpTy->getScalarType()->isIntegerTy(1));
4408 
4409   // LHS ==> RHS by definition
4410   if (!InvertAPred && LHS == RHS)
4411     return true;
4412 
4413   if (OpTy->isVectorTy())
4414     // TODO: extending the code below to handle vectors
4415     return None;
4416   assert(OpTy->isIntegerTy(1) && "implied by above");
4417 
4418   ICmpInst::Predicate APred, BPred;
4419   Value *ALHS, *ARHS;
4420   Value *BLHS, *BRHS;
4421 
4422   if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
4423       !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
4424     return None;
4425 
4426   if (InvertAPred)
4427     APred = CmpInst::getInversePredicate(APred);
4428 
4429   // Can we infer anything when the two compares have matching operands?
4430   bool IsSwappedOps;
4431   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4432     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4433             APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4434       return Implication;
4435     // No amount of additional analysis will infer the second condition, so
4436     // early exit.
4437     return None;
4438   }
4439 
4440   // Can we infer anything when the LHS operands match and the RHS operands are
4441   // constants (not necessarily matching)?
4442   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4443     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4444             APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4445             cast<ConstantInt>(BRHS)))
4446       return Implication;
4447     // No amount of additional analysis will infer the second condition, so
4448     // early exit.
4449     return None;
4450   }
4451 
4452   if (APred == BPred)
4453     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
4454                                  CxtI, DT);
4455 
4456   return None;
4457 }
4458