1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GetElementPtrTypeIterator.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Operator.h"
59 #include "llvm/IR/PatternMatch.h"
60 #include "llvm/IR/Type.h"
61 #include "llvm/IR/User.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/KnownBits.h"
68 #include "llvm/Support/MathExtras.h"
69 #include <algorithm>
70 #include <array>
71 #include <cassert>
72 #include <cstdint>
73 #include <iterator>
74 #include <utility>
75 
76 using namespace llvm;
77 using namespace llvm::PatternMatch;
78 
79 const unsigned MaxDepth = 6;
80 
81 // Controls the number of uses of the value searched for possible
82 // dominating comparisons.
83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84                                               cl::Hidden, cl::init(20));
85 
86 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
87 /// returns the element type's bitwidth.
88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
89   if (unsigned BitWidth = Ty->getScalarSizeInBits())
90     return BitWidth;
91 
92   return DL.getIndexTypeSizeInBits(Ty);
93 }
94 
95 namespace {
96 
97 // Simplifying using an assume can only be done in a particular control-flow
98 // context (the context instruction provides that context). If an assume and
99 // the context instruction are not in the same block then the DT helps in
100 // figuring out if we can use it.
101 struct Query {
102   const DataLayout &DL;
103   AssumptionCache *AC;
104   const Instruction *CxtI;
105   const DominatorTree *DT;
106 
107   // Unlike the other analyses, this may be a nullptr because not all clients
108   // provide it currently.
109   OptimizationRemarkEmitter *ORE;
110 
111   /// Set of assumptions that should be excluded from further queries.
112   /// This is because of the potential for mutual recursion to cause
113   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
114   /// classic case of this is assume(x = y), which will attempt to determine
115   /// bits in x from bits in y, which will attempt to determine bits in y from
116   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
117   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
118   /// (all of which can call computeKnownBits), and so on.
119   std::array<const Value *, MaxDepth> Excluded;
120 
121   unsigned NumExcluded = 0;
122 
123   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
124         const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
125       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {}
126 
127   Query(const Query &Q, const Value *NewExcl)
128       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
129         NumExcluded(Q.NumExcluded) {
130     Excluded = Q.Excluded;
131     Excluded[NumExcluded++] = NewExcl;
132     assert(NumExcluded <= Excluded.size());
133   }
134 
135   bool isExcluded(const Value *Value) const {
136     if (NumExcluded == 0)
137       return false;
138     auto End = Excluded.begin() + NumExcluded;
139     return std::find(Excluded.begin(), End, Value) != End;
140   }
141 };
142 
143 } // end anonymous namespace
144 
145 // Given the provided Value and, potentially, a context instruction, return
146 // the preferred context instruction (if any).
147 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
148   // If we've been provided with a context instruction, then use that (provided
149   // it has been inserted).
150   if (CxtI && CxtI->getParent())
151     return CxtI;
152 
153   // If the value is really an already-inserted instruction, then use that.
154   CxtI = dyn_cast<Instruction>(V);
155   if (CxtI && CxtI->getParent())
156     return CxtI;
157 
158   return nullptr;
159 }
160 
161 static void computeKnownBits(const Value *V, KnownBits &Known,
162                              unsigned Depth, const Query &Q);
163 
164 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
165                             const DataLayout &DL, unsigned Depth,
166                             AssumptionCache *AC, const Instruction *CxtI,
167                             const DominatorTree *DT,
168                             OptimizationRemarkEmitter *ORE) {
169   ::computeKnownBits(V, Known, Depth,
170                      Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
171 }
172 
173 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
174                                   const Query &Q);
175 
176 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
177                                  unsigned Depth, AssumptionCache *AC,
178                                  const Instruction *CxtI,
179                                  const DominatorTree *DT,
180                                  OptimizationRemarkEmitter *ORE) {
181   return ::computeKnownBits(V, Depth,
182                             Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
183 }
184 
185 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
186                                const DataLayout &DL,
187                                AssumptionCache *AC, const Instruction *CxtI,
188                                const DominatorTree *DT) {
189   assert(LHS->getType() == RHS->getType() &&
190          "LHS and RHS should have the same type");
191   assert(LHS->getType()->isIntOrIntVectorTy() &&
192          "LHS and RHS should be integers");
193   // Look for an inverted mask: (X & ~M) op (Y & M).
194   Value *M;
195   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
196       match(RHS, m_c_And(m_Specific(M), m_Value())))
197     return true;
198   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
199       match(LHS, m_c_And(m_Specific(M), m_Value())))
200     return true;
201   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
202   KnownBits LHSKnown(IT->getBitWidth());
203   KnownBits RHSKnown(IT->getBitWidth());
204   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT);
205   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT);
206   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
207 }
208 
209 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
210   for (const User *U : CxtI->users()) {
211     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
212       if (IC->isEquality())
213         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
214           if (C->isNullValue())
215             continue;
216     return false;
217   }
218   return true;
219 }
220 
221 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
222                                    const Query &Q);
223 
224 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
225                                   bool OrZero,
226                                   unsigned Depth, AssumptionCache *AC,
227                                   const Instruction *CxtI,
228                                   const DominatorTree *DT) {
229   return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
230                                   Query(DL, AC, safeCxtI(V, CxtI), DT));
231 }
232 
233 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
234 
235 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
236                           AssumptionCache *AC, const Instruction *CxtI,
237                           const DominatorTree *DT) {
238   return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
239 }
240 
241 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
242                               unsigned Depth,
243                               AssumptionCache *AC, const Instruction *CxtI,
244                               const DominatorTree *DT) {
245   KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
246   return Known.isNonNegative();
247 }
248 
249 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
250                            AssumptionCache *AC, const Instruction *CxtI,
251                            const DominatorTree *DT) {
252   if (auto *CI = dyn_cast<ConstantInt>(V))
253     return CI->getValue().isStrictlyPositive();
254 
255   // TODO: We'd doing two recursive queries here.  We should factor this such
256   // that only a single query is needed.
257   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
258     isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
259 }
260 
261 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
262                            AssumptionCache *AC, const Instruction *CxtI,
263                            const DominatorTree *DT) {
264   KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
265   return Known.isNegative();
266 }
267 
268 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
269 
270 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
271                            const DataLayout &DL,
272                            AssumptionCache *AC, const Instruction *CxtI,
273                            const DominatorTree *DT) {
274   return ::isKnownNonEqual(V1, V2, Query(DL, AC,
275                                          safeCxtI(V1, safeCxtI(V2, CxtI)),
276                                          DT));
277 }
278 
279 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
280                               const Query &Q);
281 
282 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
283                              const DataLayout &DL,
284                              unsigned Depth, AssumptionCache *AC,
285                              const Instruction *CxtI, const DominatorTree *DT) {
286   return ::MaskedValueIsZero(V, Mask, Depth,
287                              Query(DL, AC, safeCxtI(V, CxtI), DT));
288 }
289 
290 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
291                                    const Query &Q);
292 
293 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
294                                   unsigned Depth, AssumptionCache *AC,
295                                   const Instruction *CxtI,
296                                   const DominatorTree *DT) {
297   return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
298 }
299 
300 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
301                                    bool NSW,
302                                    KnownBits &KnownOut, KnownBits &Known2,
303                                    unsigned Depth, const Query &Q) {
304   unsigned BitWidth = KnownOut.getBitWidth();
305 
306   // If an initial sequence of bits in the result is not needed, the
307   // corresponding bits in the operands are not needed.
308   KnownBits LHSKnown(BitWidth);
309   computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
310   computeKnownBits(Op1, Known2, Depth + 1, Q);
311 
312   KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
313 }
314 
315 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
316                                 KnownBits &Known, KnownBits &Known2,
317                                 unsigned Depth, const Query &Q) {
318   unsigned BitWidth = Known.getBitWidth();
319   computeKnownBits(Op1, Known, Depth + 1, Q);
320   computeKnownBits(Op0, Known2, Depth + 1, Q);
321 
322   bool isKnownNegative = false;
323   bool isKnownNonNegative = false;
324   // If the multiplication is known not to overflow, compute the sign bit.
325   if (NSW) {
326     if (Op0 == Op1) {
327       // The product of a number with itself is non-negative.
328       isKnownNonNegative = true;
329     } else {
330       bool isKnownNonNegativeOp1 = Known.isNonNegative();
331       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
332       bool isKnownNegativeOp1 = Known.isNegative();
333       bool isKnownNegativeOp0 = Known2.isNegative();
334       // The product of two numbers with the same sign is non-negative.
335       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
336         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
337       // The product of a negative number and a non-negative number is either
338       // negative or zero.
339       if (!isKnownNonNegative)
340         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
341                            isKnownNonZero(Op0, Depth, Q)) ||
342                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
343                            isKnownNonZero(Op1, Depth, Q));
344     }
345   }
346 
347   assert(!Known.hasConflict() && !Known2.hasConflict());
348   // Compute a conservative estimate for high known-0 bits.
349   unsigned LeadZ =  std::max(Known.countMinLeadingZeros() +
350                              Known2.countMinLeadingZeros(),
351                              BitWidth) - BitWidth;
352   LeadZ = std::min(LeadZ, BitWidth);
353 
354   // The result of the bottom bits of an integer multiply can be
355   // inferred by looking at the bottom bits of both operands and
356   // multiplying them together.
357   // We can infer at least the minimum number of known trailing bits
358   // of both operands. Depending on number of trailing zeros, we can
359   // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
360   // a and b are divisible by m and n respectively.
361   // We then calculate how many of those bits are inferrable and set
362   // the output. For example, the i8 mul:
363   //  a = XXXX1100 (12)
364   //  b = XXXX1110 (14)
365   // We know the bottom 3 bits are zero since the first can be divided by
366   // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
367   // Applying the multiplication to the trimmed arguments gets:
368   //    XX11 (3)
369   //    X111 (7)
370   // -------
371   //    XX11
372   //   XX11
373   //  XX11
374   // XX11
375   // -------
376   // XXXXX01
377   // Which allows us to infer the 2 LSBs. Since we're multiplying the result
378   // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
379   // The proof for this can be described as:
380   // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
381   //      (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
382   //                    umin(countTrailingZeros(C2), C6) +
383   //                    umin(C5 - umin(countTrailingZeros(C1), C5),
384   //                         C6 - umin(countTrailingZeros(C2), C6)))) - 1)
385   // %aa = shl i8 %a, C5
386   // %bb = shl i8 %b, C6
387   // %aaa = or i8 %aa, C1
388   // %bbb = or i8 %bb, C2
389   // %mul = mul i8 %aaa, %bbb
390   // %mask = and i8 %mul, C7
391   //   =>
392   // %mask = i8 ((C1*C2)&C7)
393   // Where C5, C6 describe the known bits of %a, %b
394   // C1, C2 describe the known bottom bits of %a, %b.
395   // C7 describes the mask of the known bits of the result.
396   APInt Bottom0 = Known.One;
397   APInt Bottom1 = Known2.One;
398 
399   // How many times we'd be able to divide each argument by 2 (shr by 1).
400   // This gives us the number of trailing zeros on the multiplication result.
401   unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
402   unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
403   unsigned TrailZero0 = Known.countMinTrailingZeros();
404   unsigned TrailZero1 = Known2.countMinTrailingZeros();
405   unsigned TrailZ = TrailZero0 + TrailZero1;
406 
407   // Figure out the fewest known-bits operand.
408   unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
409                                       TrailBitsKnown1 - TrailZero1);
410   unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
411 
412   APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
413                       Bottom1.getLoBits(TrailBitsKnown1);
414 
415   Known.resetAll();
416   Known.Zero.setHighBits(LeadZ);
417   Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
418   Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
419 
420   // Only make use of no-wrap flags if we failed to compute the sign bit
421   // directly.  This matters if the multiplication always overflows, in
422   // which case we prefer to follow the result of the direct computation,
423   // though as the program is invoking undefined behaviour we can choose
424   // whatever we like here.
425   if (isKnownNonNegative && !Known.isNegative())
426     Known.makeNonNegative();
427   else if (isKnownNegative && !Known.isNonNegative())
428     Known.makeNegative();
429 }
430 
431 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
432                                              KnownBits &Known) {
433   unsigned BitWidth = Known.getBitWidth();
434   unsigned NumRanges = Ranges.getNumOperands() / 2;
435   assert(NumRanges >= 1);
436 
437   Known.Zero.setAllBits();
438   Known.One.setAllBits();
439 
440   for (unsigned i = 0; i < NumRanges; ++i) {
441     ConstantInt *Lower =
442         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
443     ConstantInt *Upper =
444         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
445     ConstantRange Range(Lower->getValue(), Upper->getValue());
446 
447     // The first CommonPrefixBits of all values in Range are equal.
448     unsigned CommonPrefixBits =
449         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
450 
451     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
452     Known.One &= Range.getUnsignedMax() & Mask;
453     Known.Zero &= ~Range.getUnsignedMax() & Mask;
454   }
455 }
456 
457 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
458   SmallVector<const Value *, 16> WorkSet(1, I);
459   SmallPtrSet<const Value *, 32> Visited;
460   SmallPtrSet<const Value *, 16> EphValues;
461 
462   // The instruction defining an assumption's condition itself is always
463   // considered ephemeral to that assumption (even if it has other
464   // non-ephemeral users). See r246696's test case for an example.
465   if (is_contained(I->operands(), E))
466     return true;
467 
468   while (!WorkSet.empty()) {
469     const Value *V = WorkSet.pop_back_val();
470     if (!Visited.insert(V).second)
471       continue;
472 
473     // If all uses of this value are ephemeral, then so is this value.
474     if (llvm::all_of(V->users(), [&](const User *U) {
475                                    return EphValues.count(U);
476                                  })) {
477       if (V == E)
478         return true;
479 
480       if (V == I || isSafeToSpeculativelyExecute(V)) {
481        EphValues.insert(V);
482        if (const User *U = dyn_cast<User>(V))
483          for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
484               J != JE; ++J)
485            WorkSet.push_back(*J);
486       }
487     }
488   }
489 
490   return false;
491 }
492 
493 // Is this an intrinsic that cannot be speculated but also cannot trap?
494 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
495   if (const CallInst *CI = dyn_cast<CallInst>(I))
496     if (Function *F = CI->getCalledFunction())
497       switch (F->getIntrinsicID()) {
498       default: break;
499       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
500       case Intrinsic::assume:
501       case Intrinsic::sideeffect:
502       case Intrinsic::dbg_declare:
503       case Intrinsic::dbg_value:
504       case Intrinsic::invariant_start:
505       case Intrinsic::invariant_end:
506       case Intrinsic::lifetime_start:
507       case Intrinsic::lifetime_end:
508       case Intrinsic::objectsize:
509       case Intrinsic::ptr_annotation:
510       case Intrinsic::var_annotation:
511         return true;
512       }
513 
514   return false;
515 }
516 
517 bool llvm::isValidAssumeForContext(const Instruction *Inv,
518                                    const Instruction *CxtI,
519                                    const DominatorTree *DT) {
520   // There are two restrictions on the use of an assume:
521   //  1. The assume must dominate the context (or the control flow must
522   //     reach the assume whenever it reaches the context).
523   //  2. The context must not be in the assume's set of ephemeral values
524   //     (otherwise we will use the assume to prove that the condition
525   //     feeding the assume is trivially true, thus causing the removal of
526   //     the assume).
527 
528   if (DT) {
529     if (DT->dominates(Inv, CxtI))
530       return true;
531   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
532     // We don't have a DT, but this trivially dominates.
533     return true;
534   }
535 
536   // With or without a DT, the only remaining case we will check is if the
537   // instructions are in the same BB.  Give up if that is not the case.
538   if (Inv->getParent() != CxtI->getParent())
539     return false;
540 
541   // If we have a dom tree, then we now know that the assume doesn't dominate
542   // the other instruction.  If we don't have a dom tree then we can check if
543   // the assume is first in the BB.
544   if (!DT) {
545     // Search forward from the assume until we reach the context (or the end
546     // of the block); the common case is that the assume will come first.
547     for (auto I = std::next(BasicBlock::const_iterator(Inv)),
548          IE = Inv->getParent()->end(); I != IE; ++I)
549       if (&*I == CxtI)
550         return true;
551   }
552 
553   // The context comes first, but they're both in the same block. Make sure
554   // there is nothing in between that might interrupt the control flow.
555   for (BasicBlock::const_iterator I =
556          std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
557        I != IE; ++I)
558     if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
559       return false;
560 
561   return !isEphemeralValueOf(Inv, CxtI);
562 }
563 
564 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
565                                        unsigned Depth, const Query &Q) {
566   // Use of assumptions is context-sensitive. If we don't have a context, we
567   // cannot use them!
568   if (!Q.AC || !Q.CxtI)
569     return;
570 
571   unsigned BitWidth = Known.getBitWidth();
572 
573   // Note that the patterns below need to be kept in sync with the code
574   // in AssumptionCache::updateAffectedValues.
575 
576   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
577     if (!AssumeVH)
578       continue;
579     CallInst *I = cast<CallInst>(AssumeVH);
580     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
581            "Got assumption for the wrong function!");
582     if (Q.isExcluded(I))
583       continue;
584 
585     // Warning: This loop can end up being somewhat performance sensitive.
586     // We're running this loop for once for each value queried resulting in a
587     // runtime of ~O(#assumes * #values).
588 
589     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
590            "must be an assume intrinsic");
591 
592     Value *Arg = I->getArgOperand(0);
593 
594     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
595       assert(BitWidth == 1 && "assume operand is not i1?");
596       Known.setAllOnes();
597       return;
598     }
599     if (match(Arg, m_Not(m_Specific(V))) &&
600         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
601       assert(BitWidth == 1 && "assume operand is not i1?");
602       Known.setAllZero();
603       return;
604     }
605 
606     // The remaining tests are all recursive, so bail out if we hit the limit.
607     if (Depth == MaxDepth)
608       continue;
609 
610     Value *A, *B;
611     auto m_V = m_CombineOr(m_Specific(V),
612                            m_CombineOr(m_PtrToInt(m_Specific(V)),
613                            m_BitCast(m_Specific(V))));
614 
615     CmpInst::Predicate Pred;
616     uint64_t C;
617     // assume(v = a)
618     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
619         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
620       KnownBits RHSKnown(BitWidth);
621       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
622       Known.Zero |= RHSKnown.Zero;
623       Known.One  |= RHSKnown.One;
624     // assume(v & b = a)
625     } else if (match(Arg,
626                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
627                Pred == ICmpInst::ICMP_EQ &&
628                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
629       KnownBits RHSKnown(BitWidth);
630       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
631       KnownBits MaskKnown(BitWidth);
632       computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
633 
634       // For those bits in the mask that are known to be one, we can propagate
635       // known bits from the RHS to V.
636       Known.Zero |= RHSKnown.Zero & MaskKnown.One;
637       Known.One  |= RHSKnown.One  & MaskKnown.One;
638     // assume(~(v & b) = a)
639     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
640                                    m_Value(A))) &&
641                Pred == ICmpInst::ICMP_EQ &&
642                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
643       KnownBits RHSKnown(BitWidth);
644       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
645       KnownBits MaskKnown(BitWidth);
646       computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
647 
648       // For those bits in the mask that are known to be one, we can propagate
649       // inverted known bits from the RHS to V.
650       Known.Zero |= RHSKnown.One  & MaskKnown.One;
651       Known.One  |= RHSKnown.Zero & MaskKnown.One;
652     // assume(v | b = a)
653     } else if (match(Arg,
654                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
655                Pred == ICmpInst::ICMP_EQ &&
656                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
657       KnownBits RHSKnown(BitWidth);
658       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
659       KnownBits BKnown(BitWidth);
660       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
661 
662       // For those bits in B that are known to be zero, we can propagate known
663       // bits from the RHS to V.
664       Known.Zero |= RHSKnown.Zero & BKnown.Zero;
665       Known.One  |= RHSKnown.One  & BKnown.Zero;
666     // assume(~(v | b) = a)
667     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
668                                    m_Value(A))) &&
669                Pred == ICmpInst::ICMP_EQ &&
670                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
671       KnownBits RHSKnown(BitWidth);
672       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
673       KnownBits BKnown(BitWidth);
674       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
675 
676       // For those bits in B that are known to be zero, we can propagate
677       // inverted known bits from the RHS to V.
678       Known.Zero |= RHSKnown.One  & BKnown.Zero;
679       Known.One  |= RHSKnown.Zero & BKnown.Zero;
680     // assume(v ^ b = a)
681     } else if (match(Arg,
682                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
683                Pred == ICmpInst::ICMP_EQ &&
684                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
685       KnownBits RHSKnown(BitWidth);
686       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
687       KnownBits BKnown(BitWidth);
688       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
689 
690       // For those bits in B that are known to be zero, we can propagate known
691       // bits from the RHS to V. For those bits in B that are known to be one,
692       // we can propagate inverted known bits from the RHS to V.
693       Known.Zero |= RHSKnown.Zero & BKnown.Zero;
694       Known.One  |= RHSKnown.One  & BKnown.Zero;
695       Known.Zero |= RHSKnown.One  & BKnown.One;
696       Known.One  |= RHSKnown.Zero & BKnown.One;
697     // assume(~(v ^ b) = a)
698     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
699                                    m_Value(A))) &&
700                Pred == ICmpInst::ICMP_EQ &&
701                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
702       KnownBits RHSKnown(BitWidth);
703       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
704       KnownBits BKnown(BitWidth);
705       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
706 
707       // For those bits in B that are known to be zero, we can propagate
708       // inverted known bits from the RHS to V. For those bits in B that are
709       // known to be one, we can propagate known bits from the RHS to V.
710       Known.Zero |= RHSKnown.One  & BKnown.Zero;
711       Known.One  |= RHSKnown.Zero & BKnown.Zero;
712       Known.Zero |= RHSKnown.Zero & BKnown.One;
713       Known.One  |= RHSKnown.One  & BKnown.One;
714     // assume(v << c = a)
715     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
716                                    m_Value(A))) &&
717                Pred == ICmpInst::ICMP_EQ &&
718                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
719                C < BitWidth) {
720       KnownBits RHSKnown(BitWidth);
721       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
722       // For those bits in RHS that are known, we can propagate them to known
723       // bits in V shifted to the right by C.
724       RHSKnown.Zero.lshrInPlace(C);
725       Known.Zero |= RHSKnown.Zero;
726       RHSKnown.One.lshrInPlace(C);
727       Known.One  |= RHSKnown.One;
728     // assume(~(v << c) = a)
729     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
730                                    m_Value(A))) &&
731                Pred == ICmpInst::ICMP_EQ &&
732                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
733                C < BitWidth) {
734       KnownBits RHSKnown(BitWidth);
735       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
736       // For those bits in RHS that are known, we can propagate them inverted
737       // to known bits in V shifted to the right by C.
738       RHSKnown.One.lshrInPlace(C);
739       Known.Zero |= RHSKnown.One;
740       RHSKnown.Zero.lshrInPlace(C);
741       Known.One  |= RHSKnown.Zero;
742     // assume(v >> c = a)
743     } else if (match(Arg,
744                      m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
745                               m_Value(A))) &&
746                Pred == ICmpInst::ICMP_EQ &&
747                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
748                C < BitWidth) {
749       KnownBits RHSKnown(BitWidth);
750       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
751       // For those bits in RHS that are known, we can propagate them to known
752       // bits in V shifted to the right by C.
753       Known.Zero |= RHSKnown.Zero << C;
754       Known.One  |= RHSKnown.One  << C;
755     // assume(~(v >> c) = a)
756     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
757                                    m_Value(A))) &&
758                Pred == ICmpInst::ICMP_EQ &&
759                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
760                C < BitWidth) {
761       KnownBits RHSKnown(BitWidth);
762       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
763       // For those bits in RHS that are known, we can propagate them inverted
764       // to known bits in V shifted to the right by C.
765       Known.Zero |= RHSKnown.One  << C;
766       Known.One  |= RHSKnown.Zero << C;
767     // assume(v >=_s c) where c is non-negative
768     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
769                Pred == ICmpInst::ICMP_SGE &&
770                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
771       KnownBits RHSKnown(BitWidth);
772       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
773 
774       if (RHSKnown.isNonNegative()) {
775         // We know that the sign bit is zero.
776         Known.makeNonNegative();
777       }
778     // assume(v >_s c) where c is at least -1.
779     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
780                Pred == ICmpInst::ICMP_SGT &&
781                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
782       KnownBits RHSKnown(BitWidth);
783       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
784 
785       if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
786         // We know that the sign bit is zero.
787         Known.makeNonNegative();
788       }
789     // assume(v <=_s c) where c is negative
790     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
791                Pred == ICmpInst::ICMP_SLE &&
792                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
793       KnownBits RHSKnown(BitWidth);
794       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
795 
796       if (RHSKnown.isNegative()) {
797         // We know that the sign bit is one.
798         Known.makeNegative();
799       }
800     // assume(v <_s c) where c is non-positive
801     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
802                Pred == ICmpInst::ICMP_SLT &&
803                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
804       KnownBits RHSKnown(BitWidth);
805       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
806 
807       if (RHSKnown.isZero() || RHSKnown.isNegative()) {
808         // We know that the sign bit is one.
809         Known.makeNegative();
810       }
811     // assume(v <=_u c)
812     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
813                Pred == ICmpInst::ICMP_ULE &&
814                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
815       KnownBits RHSKnown(BitWidth);
816       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
817 
818       // Whatever high bits in c are zero are known to be zero.
819       Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
820       // assume(v <_u c)
821     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
822                Pred == ICmpInst::ICMP_ULT &&
823                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
824       KnownBits RHSKnown(BitWidth);
825       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
826 
827       // If the RHS is known zero, then this assumption must be wrong (nothing
828       // is unsigned less than zero). Signal a conflict and get out of here.
829       if (RHSKnown.isZero()) {
830         Known.Zero.setAllBits();
831         Known.One.setAllBits();
832         break;
833       }
834 
835       // Whatever high bits in c are zero are known to be zero (if c is a power
836       // of 2, then one more).
837       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
838         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
839       else
840         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
841     }
842   }
843 
844   // If assumptions conflict with each other or previous known bits, then we
845   // have a logical fallacy. It's possible that the assumption is not reachable,
846   // so this isn't a real bug. On the other hand, the program may have undefined
847   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
848   // clear out the known bits, try to warn the user, and hope for the best.
849   if (Known.Zero.intersects(Known.One)) {
850     Known.resetAll();
851 
852     if (Q.ORE)
853       Q.ORE->emit([&]() {
854         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
855         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
856                                           CxtI)
857                << "Detected conflicting code assumptions. Program may "
858                   "have undefined behavior, or compiler may have "
859                   "internal error.";
860       });
861   }
862 }
863 
864 /// Compute known bits from a shift operator, including those with a
865 /// non-constant shift amount. Known is the output of this function. Known2 is a
866 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
867 /// operator-specific functions that, given the known-zero or known-one bits
868 /// respectively, and a shift amount, compute the implied known-zero or
869 /// known-one bits of the shift operator's result respectively for that shift
870 /// amount. The results from calling KZF and KOF are conservatively combined for
871 /// all permitted shift amounts.
872 static void computeKnownBitsFromShiftOperator(
873     const Operator *I, KnownBits &Known, KnownBits &Known2,
874     unsigned Depth, const Query &Q,
875     function_ref<APInt(const APInt &, unsigned)> KZF,
876     function_ref<APInt(const APInt &, unsigned)> KOF) {
877   unsigned BitWidth = Known.getBitWidth();
878 
879   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
880     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
881 
882     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
883     Known.Zero = KZF(Known.Zero, ShiftAmt);
884     Known.One  = KOF(Known.One, ShiftAmt);
885     // If the known bits conflict, this must be an overflowing left shift, so
886     // the shift result is poison. We can return anything we want. Choose 0 for
887     // the best folding opportunity.
888     if (Known.hasConflict())
889       Known.setAllZero();
890 
891     return;
892   }
893 
894   computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
895 
896   // If the shift amount could be greater than or equal to the bit-width of the
897   // LHS, the value could be poison, but bail out because the check below is
898   // expensive. TODO: Should we just carry on?
899   if ((~Known.Zero).uge(BitWidth)) {
900     Known.resetAll();
901     return;
902   }
903 
904   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
905   // BitWidth > 64 and any upper bits are known, we'll end up returning the
906   // limit value (which implies all bits are known).
907   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
908   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
909 
910   // It would be more-clearly correct to use the two temporaries for this
911   // calculation. Reusing the APInts here to prevent unnecessary allocations.
912   Known.resetAll();
913 
914   // If we know the shifter operand is nonzero, we can sometimes infer more
915   // known bits. However this is expensive to compute, so be lazy about it and
916   // only compute it when absolutely necessary.
917   Optional<bool> ShifterOperandIsNonZero;
918 
919   // Early exit if we can't constrain any well-defined shift amount.
920   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
921       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
922     ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
923     if (!*ShifterOperandIsNonZero)
924       return;
925   }
926 
927   computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
928 
929   Known.Zero.setAllBits();
930   Known.One.setAllBits();
931   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
932     // Combine the shifted known input bits only for those shift amounts
933     // compatible with its known constraints.
934     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
935       continue;
936     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
937       continue;
938     // If we know the shifter is nonzero, we may be able to infer more known
939     // bits. This check is sunk down as far as possible to avoid the expensive
940     // call to isKnownNonZero if the cheaper checks above fail.
941     if (ShiftAmt == 0) {
942       if (!ShifterOperandIsNonZero.hasValue())
943         ShifterOperandIsNonZero =
944             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
945       if (*ShifterOperandIsNonZero)
946         continue;
947     }
948 
949     Known.Zero &= KZF(Known2.Zero, ShiftAmt);
950     Known.One  &= KOF(Known2.One, ShiftAmt);
951   }
952 
953   // If the known bits conflict, the result is poison. Return a 0 and hope the
954   // caller can further optimize that.
955   if (Known.hasConflict())
956     Known.setAllZero();
957 }
958 
959 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
960                                          unsigned Depth, const Query &Q) {
961   unsigned BitWidth = Known.getBitWidth();
962 
963   KnownBits Known2(Known);
964   switch (I->getOpcode()) {
965   default: break;
966   case Instruction::Load:
967     if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
968       computeKnownBitsFromRangeMetadata(*MD, Known);
969     break;
970   case Instruction::And: {
971     // If either the LHS or the RHS are Zero, the result is zero.
972     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
973     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
974 
975     // Output known-1 bits are only known if set in both the LHS & RHS.
976     Known.One &= Known2.One;
977     // Output known-0 are known to be clear if zero in either the LHS | RHS.
978     Known.Zero |= Known2.Zero;
979 
980     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
981     // here we handle the more general case of adding any odd number by
982     // matching the form add(x, add(x, y)) where y is odd.
983     // TODO: This could be generalized to clearing any bit set in y where the
984     // following bit is known to be unset in y.
985     Value *X = nullptr, *Y = nullptr;
986     if (!Known.Zero[0] && !Known.One[0] &&
987         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
988       Known2.resetAll();
989       computeKnownBits(Y, Known2, Depth + 1, Q);
990       if (Known2.countMinTrailingOnes() > 0)
991         Known.Zero.setBit(0);
992     }
993     break;
994   }
995   case Instruction::Or:
996     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
997     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
998 
999     // Output known-0 bits are only known if clear in both the LHS & RHS.
1000     Known.Zero &= Known2.Zero;
1001     // Output known-1 are known to be set if set in either the LHS | RHS.
1002     Known.One |= Known2.One;
1003     break;
1004   case Instruction::Xor: {
1005     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1006     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1007 
1008     // Output known-0 bits are known if clear or set in both the LHS & RHS.
1009     APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1010     // Output known-1 are known to be set if set in only one of the LHS, RHS.
1011     Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1012     Known.Zero = std::move(KnownZeroOut);
1013     break;
1014   }
1015   case Instruction::Mul: {
1016     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1017     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1018                         Known2, Depth, Q);
1019     break;
1020   }
1021   case Instruction::UDiv: {
1022     // For the purposes of computing leading zeros we can conservatively
1023     // treat a udiv as a logical right shift by the power of 2 known to
1024     // be less than the denominator.
1025     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1026     unsigned LeadZ = Known2.countMinLeadingZeros();
1027 
1028     Known2.resetAll();
1029     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1030     unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1031     if (RHSMaxLeadingZeros != BitWidth)
1032       LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1033 
1034     Known.Zero.setHighBits(LeadZ);
1035     break;
1036   }
1037   case Instruction::Select: {
1038     const Value *LHS, *RHS;
1039     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1040     if (SelectPatternResult::isMinOrMax(SPF)) {
1041       computeKnownBits(RHS, Known, Depth + 1, Q);
1042       computeKnownBits(LHS, Known2, Depth + 1, Q);
1043     } else {
1044       computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1045       computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1046     }
1047 
1048     unsigned MaxHighOnes = 0;
1049     unsigned MaxHighZeros = 0;
1050     if (SPF == SPF_SMAX) {
1051       // If both sides are negative, the result is negative.
1052       if (Known.isNegative() && Known2.isNegative())
1053         // We can derive a lower bound on the result by taking the max of the
1054         // leading one bits.
1055         MaxHighOnes =
1056             std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1057       // If either side is non-negative, the result is non-negative.
1058       else if (Known.isNonNegative() || Known2.isNonNegative())
1059         MaxHighZeros = 1;
1060     } else if (SPF == SPF_SMIN) {
1061       // If both sides are non-negative, the result is non-negative.
1062       if (Known.isNonNegative() && Known2.isNonNegative())
1063         // We can derive an upper bound on the result by taking the max of the
1064         // leading zero bits.
1065         MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1066                                 Known2.countMinLeadingZeros());
1067       // If either side is negative, the result is negative.
1068       else if (Known.isNegative() || Known2.isNegative())
1069         MaxHighOnes = 1;
1070     } else if (SPF == SPF_UMAX) {
1071       // We can derive a lower bound on the result by taking the max of the
1072       // leading one bits.
1073       MaxHighOnes =
1074           std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1075     } else if (SPF == SPF_UMIN) {
1076       // We can derive an upper bound on the result by taking the max of the
1077       // leading zero bits.
1078       MaxHighZeros =
1079           std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1080     }
1081 
1082     // Only known if known in both the LHS and RHS.
1083     Known.One &= Known2.One;
1084     Known.Zero &= Known2.Zero;
1085     if (MaxHighOnes > 0)
1086       Known.One.setHighBits(MaxHighOnes);
1087     if (MaxHighZeros > 0)
1088       Known.Zero.setHighBits(MaxHighZeros);
1089     break;
1090   }
1091   case Instruction::FPTrunc:
1092   case Instruction::FPExt:
1093   case Instruction::FPToUI:
1094   case Instruction::FPToSI:
1095   case Instruction::SIToFP:
1096   case Instruction::UIToFP:
1097     break; // Can't work with floating point.
1098   case Instruction::PtrToInt:
1099   case Instruction::IntToPtr:
1100     // Fall through and handle them the same as zext/trunc.
1101     LLVM_FALLTHROUGH;
1102   case Instruction::ZExt:
1103   case Instruction::Trunc: {
1104     Type *SrcTy = I->getOperand(0)->getType();
1105 
1106     unsigned SrcBitWidth;
1107     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1108     // which fall through here.
1109     Type *ScalarTy = SrcTy->getScalarType();
1110     SrcBitWidth = ScalarTy->isPointerTy() ?
1111       Q.DL.getIndexTypeSizeInBits(ScalarTy) :
1112       Q.DL.getTypeSizeInBits(ScalarTy);
1113 
1114     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1115     Known = Known.zextOrTrunc(SrcBitWidth);
1116     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1117     Known = Known.zextOrTrunc(BitWidth);
1118     // Any top bits are known to be zero.
1119     if (BitWidth > SrcBitWidth)
1120       Known.Zero.setBitsFrom(SrcBitWidth);
1121     break;
1122   }
1123   case Instruction::BitCast: {
1124     Type *SrcTy = I->getOperand(0)->getType();
1125     if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1126         // TODO: For now, not handling conversions like:
1127         // (bitcast i64 %x to <2 x i32>)
1128         !I->getType()->isVectorTy()) {
1129       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1130       break;
1131     }
1132     break;
1133   }
1134   case Instruction::SExt: {
1135     // Compute the bits in the result that are not present in the input.
1136     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1137 
1138     Known = Known.trunc(SrcBitWidth);
1139     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1140     // If the sign bit of the input is known set or clear, then we know the
1141     // top bits of the result.
1142     Known = Known.sext(BitWidth);
1143     break;
1144   }
1145   case Instruction::Shl: {
1146     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1147     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1148     auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1149       APInt KZResult = KnownZero << ShiftAmt;
1150       KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1151       // If this shift has "nsw" keyword, then the result is either a poison
1152       // value or has the same sign bit as the first operand.
1153       if (NSW && KnownZero.isSignBitSet())
1154         KZResult.setSignBit();
1155       return KZResult;
1156     };
1157 
1158     auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1159       APInt KOResult = KnownOne << ShiftAmt;
1160       if (NSW && KnownOne.isSignBitSet())
1161         KOResult.setSignBit();
1162       return KOResult;
1163     };
1164 
1165     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1166     break;
1167   }
1168   case Instruction::LShr: {
1169     // (lshr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1170     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1171       APInt KZResult = KnownZero.lshr(ShiftAmt);
1172       // High bits known zero.
1173       KZResult.setHighBits(ShiftAmt);
1174       return KZResult;
1175     };
1176 
1177     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1178       return KnownOne.lshr(ShiftAmt);
1179     };
1180 
1181     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1182     break;
1183   }
1184   case Instruction::AShr: {
1185     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1186     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1187       return KnownZero.ashr(ShiftAmt);
1188     };
1189 
1190     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1191       return KnownOne.ashr(ShiftAmt);
1192     };
1193 
1194     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1195     break;
1196   }
1197   case Instruction::Sub: {
1198     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1199     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1200                            Known, Known2, Depth, Q);
1201     break;
1202   }
1203   case Instruction::Add: {
1204     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1205     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1206                            Known, Known2, Depth, Q);
1207     break;
1208   }
1209   case Instruction::SRem:
1210     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1211       APInt RA = Rem->getValue().abs();
1212       if (RA.isPowerOf2()) {
1213         APInt LowBits = RA - 1;
1214         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1215 
1216         // The low bits of the first operand are unchanged by the srem.
1217         Known.Zero = Known2.Zero & LowBits;
1218         Known.One = Known2.One & LowBits;
1219 
1220         // If the first operand is non-negative or has all low bits zero, then
1221         // the upper bits are all zero.
1222         if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1223           Known.Zero |= ~LowBits;
1224 
1225         // If the first operand is negative and not all low bits are zero, then
1226         // the upper bits are all one.
1227         if (Known2.isNegative() && LowBits.intersects(Known2.One))
1228           Known.One |= ~LowBits;
1229 
1230         assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1231         break;
1232       }
1233     }
1234 
1235     // The sign bit is the LHS's sign bit, except when the result of the
1236     // remainder is zero.
1237     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1238     // If it's known zero, our sign bit is also zero.
1239     if (Known2.isNonNegative())
1240       Known.makeNonNegative();
1241 
1242     break;
1243   case Instruction::URem: {
1244     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1245       const APInt &RA = Rem->getValue();
1246       if (RA.isPowerOf2()) {
1247         APInt LowBits = (RA - 1);
1248         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1249         Known.Zero |= ~LowBits;
1250         Known.One &= LowBits;
1251         break;
1252       }
1253     }
1254 
1255     // Since the result is less than or equal to either operand, any leading
1256     // zero bits in either operand must also exist in the result.
1257     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1258     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1259 
1260     unsigned Leaders =
1261         std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1262     Known.resetAll();
1263     Known.Zero.setHighBits(Leaders);
1264     break;
1265   }
1266 
1267   case Instruction::Alloca: {
1268     const AllocaInst *AI = cast<AllocaInst>(I);
1269     unsigned Align = AI->getAlignment();
1270     if (Align == 0)
1271       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1272 
1273     if (Align > 0)
1274       Known.Zero.setLowBits(countTrailingZeros(Align));
1275     break;
1276   }
1277   case Instruction::GetElementPtr: {
1278     // Analyze all of the subscripts of this getelementptr instruction
1279     // to determine if we can prove known low zero bits.
1280     KnownBits LocalKnown(BitWidth);
1281     computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1282     unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1283 
1284     gep_type_iterator GTI = gep_type_begin(I);
1285     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1286       Value *Index = I->getOperand(i);
1287       if (StructType *STy = GTI.getStructTypeOrNull()) {
1288         // Handle struct member offset arithmetic.
1289 
1290         // Handle case when index is vector zeroinitializer
1291         Constant *CIndex = cast<Constant>(Index);
1292         if (CIndex->isZeroValue())
1293           continue;
1294 
1295         if (CIndex->getType()->isVectorTy())
1296           Index = CIndex->getSplatValue();
1297 
1298         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1299         const StructLayout *SL = Q.DL.getStructLayout(STy);
1300         uint64_t Offset = SL->getElementOffset(Idx);
1301         TrailZ = std::min<unsigned>(TrailZ,
1302                                     countTrailingZeros(Offset));
1303       } else {
1304         // Handle array index arithmetic.
1305         Type *IndexedTy = GTI.getIndexedType();
1306         if (!IndexedTy->isSized()) {
1307           TrailZ = 0;
1308           break;
1309         }
1310         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1311         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1312         LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1313         computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1314         TrailZ = std::min(TrailZ,
1315                           unsigned(countTrailingZeros(TypeSize) +
1316                                    LocalKnown.countMinTrailingZeros()));
1317       }
1318     }
1319 
1320     Known.Zero.setLowBits(TrailZ);
1321     break;
1322   }
1323   case Instruction::PHI: {
1324     const PHINode *P = cast<PHINode>(I);
1325     // Handle the case of a simple two-predecessor recurrence PHI.
1326     // There's a lot more that could theoretically be done here, but
1327     // this is sufficient to catch some interesting cases.
1328     if (P->getNumIncomingValues() == 2) {
1329       for (unsigned i = 0; i != 2; ++i) {
1330         Value *L = P->getIncomingValue(i);
1331         Value *R = P->getIncomingValue(!i);
1332         Operator *LU = dyn_cast<Operator>(L);
1333         if (!LU)
1334           continue;
1335         unsigned Opcode = LU->getOpcode();
1336         // Check for operations that have the property that if
1337         // both their operands have low zero bits, the result
1338         // will have low zero bits.
1339         if (Opcode == Instruction::Add ||
1340             Opcode == Instruction::Sub ||
1341             Opcode == Instruction::And ||
1342             Opcode == Instruction::Or ||
1343             Opcode == Instruction::Mul) {
1344           Value *LL = LU->getOperand(0);
1345           Value *LR = LU->getOperand(1);
1346           // Find a recurrence.
1347           if (LL == I)
1348             L = LR;
1349           else if (LR == I)
1350             L = LL;
1351           else
1352             break;
1353           // Ok, we have a PHI of the form L op= R. Check for low
1354           // zero bits.
1355           computeKnownBits(R, Known2, Depth + 1, Q);
1356 
1357           // We need to take the minimum number of known bits
1358           KnownBits Known3(Known);
1359           computeKnownBits(L, Known3, Depth + 1, Q);
1360 
1361           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1362                                          Known3.countMinTrailingZeros()));
1363 
1364           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1365           if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1366             // If initial value of recurrence is nonnegative, and we are adding
1367             // a nonnegative number with nsw, the result can only be nonnegative
1368             // or poison value regardless of the number of times we execute the
1369             // add in phi recurrence. If initial value is negative and we are
1370             // adding a negative number with nsw, the result can only be
1371             // negative or poison value. Similar arguments apply to sub and mul.
1372             //
1373             // (add non-negative, non-negative) --> non-negative
1374             // (add negative, negative) --> negative
1375             if (Opcode == Instruction::Add) {
1376               if (Known2.isNonNegative() && Known3.isNonNegative())
1377                 Known.makeNonNegative();
1378               else if (Known2.isNegative() && Known3.isNegative())
1379                 Known.makeNegative();
1380             }
1381 
1382             // (sub nsw non-negative, negative) --> non-negative
1383             // (sub nsw negative, non-negative) --> negative
1384             else if (Opcode == Instruction::Sub && LL == I) {
1385               if (Known2.isNonNegative() && Known3.isNegative())
1386                 Known.makeNonNegative();
1387               else if (Known2.isNegative() && Known3.isNonNegative())
1388                 Known.makeNegative();
1389             }
1390 
1391             // (mul nsw non-negative, non-negative) --> non-negative
1392             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1393                      Known3.isNonNegative())
1394               Known.makeNonNegative();
1395           }
1396 
1397           break;
1398         }
1399       }
1400     }
1401 
1402     // Unreachable blocks may have zero-operand PHI nodes.
1403     if (P->getNumIncomingValues() == 0)
1404       break;
1405 
1406     // Otherwise take the unions of the known bit sets of the operands,
1407     // taking conservative care to avoid excessive recursion.
1408     if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1409       // Skip if every incoming value references to ourself.
1410       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1411         break;
1412 
1413       Known.Zero.setAllBits();
1414       Known.One.setAllBits();
1415       for (Value *IncValue : P->incoming_values()) {
1416         // Skip direct self references.
1417         if (IncValue == P) continue;
1418 
1419         Known2 = KnownBits(BitWidth);
1420         // Recurse, but cap the recursion to one level, because we don't
1421         // want to waste time spinning around in loops.
1422         computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1423         Known.Zero &= Known2.Zero;
1424         Known.One &= Known2.One;
1425         // If all bits have been ruled out, there's no need to check
1426         // more operands.
1427         if (!Known.Zero && !Known.One)
1428           break;
1429       }
1430     }
1431     break;
1432   }
1433   case Instruction::Call:
1434   case Instruction::Invoke:
1435     // If range metadata is attached to this call, set known bits from that,
1436     // and then intersect with known bits based on other properties of the
1437     // function.
1438     if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1439       computeKnownBitsFromRangeMetadata(*MD, Known);
1440     if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1441       computeKnownBits(RV, Known2, Depth + 1, Q);
1442       Known.Zero |= Known2.Zero;
1443       Known.One |= Known2.One;
1444     }
1445     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1446       switch (II->getIntrinsicID()) {
1447       default: break;
1448       case Intrinsic::bitreverse:
1449         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1450         Known.Zero |= Known2.Zero.reverseBits();
1451         Known.One |= Known2.One.reverseBits();
1452         break;
1453       case Intrinsic::bswap:
1454         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1455         Known.Zero |= Known2.Zero.byteSwap();
1456         Known.One |= Known2.One.byteSwap();
1457         break;
1458       case Intrinsic::ctlz: {
1459         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1460         // If we have a known 1, its position is our upper bound.
1461         unsigned PossibleLZ = Known2.One.countLeadingZeros();
1462         // If this call is undefined for 0, the result will be less than 2^n.
1463         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1464           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1465         unsigned LowBits = Log2_32(PossibleLZ)+1;
1466         Known.Zero.setBitsFrom(LowBits);
1467         break;
1468       }
1469       case Intrinsic::cttz: {
1470         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1471         // If we have a known 1, its position is our upper bound.
1472         unsigned PossibleTZ = Known2.One.countTrailingZeros();
1473         // If this call is undefined for 0, the result will be less than 2^n.
1474         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1475           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1476         unsigned LowBits = Log2_32(PossibleTZ)+1;
1477         Known.Zero.setBitsFrom(LowBits);
1478         break;
1479       }
1480       case Intrinsic::ctpop: {
1481         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1482         // We can bound the space the count needs.  Also, bits known to be zero
1483         // can't contribute to the population.
1484         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1485         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1486         Known.Zero.setBitsFrom(LowBits);
1487         // TODO: we could bound KnownOne using the lower bound on the number
1488         // of bits which might be set provided by popcnt KnownOne2.
1489         break;
1490       }
1491       case Intrinsic::x86_sse42_crc32_64_64:
1492         Known.Zero.setBitsFrom(32);
1493         break;
1494       }
1495     }
1496     break;
1497   case Instruction::ExtractElement:
1498     // Look through extract element. At the moment we keep this simple and skip
1499     // tracking the specific element. But at least we might find information
1500     // valid for all elements of the vector (for example if vector is sign
1501     // extended, shifted, etc).
1502     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1503     break;
1504   case Instruction::ExtractValue:
1505     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1506       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1507       if (EVI->getNumIndices() != 1) break;
1508       if (EVI->getIndices()[0] == 0) {
1509         switch (II->getIntrinsicID()) {
1510         default: break;
1511         case Intrinsic::uadd_with_overflow:
1512         case Intrinsic::sadd_with_overflow:
1513           computeKnownBitsAddSub(true, II->getArgOperand(0),
1514                                  II->getArgOperand(1), false, Known, Known2,
1515                                  Depth, Q);
1516           break;
1517         case Intrinsic::usub_with_overflow:
1518         case Intrinsic::ssub_with_overflow:
1519           computeKnownBitsAddSub(false, II->getArgOperand(0),
1520                                  II->getArgOperand(1), false, Known, Known2,
1521                                  Depth, Q);
1522           break;
1523         case Intrinsic::umul_with_overflow:
1524         case Intrinsic::smul_with_overflow:
1525           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1526                               Known, Known2, Depth, Q);
1527           break;
1528         }
1529       }
1530     }
1531   }
1532 }
1533 
1534 /// Determine which bits of V are known to be either zero or one and return
1535 /// them.
1536 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1537   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1538   computeKnownBits(V, Known, Depth, Q);
1539   return Known;
1540 }
1541 
1542 /// Determine which bits of V are known to be either zero or one and return
1543 /// them in the Known bit set.
1544 ///
1545 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1546 /// we cannot optimize based on the assumption that it is zero without changing
1547 /// it to be an explicit zero.  If we don't change it to zero, other code could
1548 /// optimized based on the contradictory assumption that it is non-zero.
1549 /// Because instcombine aggressively folds operations with undef args anyway,
1550 /// this won't lose us code quality.
1551 ///
1552 /// This function is defined on values with integer type, values with pointer
1553 /// type, and vectors of integers.  In the case
1554 /// where V is a vector, known zero, and known one values are the
1555 /// same width as the vector element, and the bit is set only if it is true
1556 /// for all of the elements in the vector.
1557 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1558                       const Query &Q) {
1559   assert(V && "No Value?");
1560   assert(Depth <= MaxDepth && "Limit Search Depth");
1561   unsigned BitWidth = Known.getBitWidth();
1562 
1563   assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1564           V->getType()->isPtrOrPtrVectorTy()) &&
1565          "Not integer or pointer type!");
1566 
1567   Type *ScalarTy = V->getType()->getScalarType();
1568   unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1569     Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1570   assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
1571   (void)BitWidth;
1572   (void)ExpectedWidth;
1573 
1574   const APInt *C;
1575   if (match(V, m_APInt(C))) {
1576     // We know all of the bits for a scalar constant or a splat vector constant!
1577     Known.One = *C;
1578     Known.Zero = ~Known.One;
1579     return;
1580   }
1581   // Null and aggregate-zero are all-zeros.
1582   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1583     Known.setAllZero();
1584     return;
1585   }
1586   // Handle a constant vector by taking the intersection of the known bits of
1587   // each element.
1588   if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1589     // We know that CDS must be a vector of integers. Take the intersection of
1590     // each element.
1591     Known.Zero.setAllBits(); Known.One.setAllBits();
1592     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1593       APInt Elt = CDS->getElementAsAPInt(i);
1594       Known.Zero &= ~Elt;
1595       Known.One &= Elt;
1596     }
1597     return;
1598   }
1599 
1600   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1601     // We know that CV must be a vector of integers. Take the intersection of
1602     // each element.
1603     Known.Zero.setAllBits(); Known.One.setAllBits();
1604     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1605       Constant *Element = CV->getAggregateElement(i);
1606       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1607       if (!ElementCI) {
1608         Known.resetAll();
1609         return;
1610       }
1611       const APInt &Elt = ElementCI->getValue();
1612       Known.Zero &= ~Elt;
1613       Known.One &= Elt;
1614     }
1615     return;
1616   }
1617 
1618   // Start out not knowing anything.
1619   Known.resetAll();
1620 
1621   // We can't imply anything about undefs.
1622   if (isa<UndefValue>(V))
1623     return;
1624 
1625   // There's no point in looking through other users of ConstantData for
1626   // assumptions.  Confirm that we've handled them all.
1627   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1628 
1629   // Limit search depth.
1630   // All recursive calls that increase depth must come after this.
1631   if (Depth == MaxDepth)
1632     return;
1633 
1634   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1635   // the bits of its aliasee.
1636   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1637     if (!GA->isInterposable())
1638       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1639     return;
1640   }
1641 
1642   if (const Operator *I = dyn_cast<Operator>(V))
1643     computeKnownBitsFromOperator(I, Known, Depth, Q);
1644 
1645   // Aligned pointers have trailing zeros - refine Known.Zero set
1646   if (V->getType()->isPointerTy()) {
1647     unsigned Align = V->getPointerAlignment(Q.DL);
1648     if (Align)
1649       Known.Zero.setLowBits(countTrailingZeros(Align));
1650   }
1651 
1652   // computeKnownBitsFromAssume strictly refines Known.
1653   // Therefore, we run them after computeKnownBitsFromOperator.
1654 
1655   // Check whether a nearby assume intrinsic can determine some known bits.
1656   computeKnownBitsFromAssume(V, Known, Depth, Q);
1657 
1658   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1659 }
1660 
1661 /// Return true if the given value is known to have exactly one
1662 /// bit set when defined. For vectors return true if every element is known to
1663 /// be a power of two when defined. Supports values with integer or pointer
1664 /// types and vectors of integers.
1665 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1666                             const Query &Q) {
1667   assert(Depth <= MaxDepth && "Limit Search Depth");
1668 
1669   // Attempt to match against constants.
1670   if (OrZero && match(V, m_Power2OrZero()))
1671       return true;
1672   if (match(V, m_Power2()))
1673       return true;
1674 
1675   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1676   // it is shifted off the end then the result is undefined.
1677   if (match(V, m_Shl(m_One(), m_Value())))
1678     return true;
1679 
1680   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1681   // the bottom.  If it is shifted off the bottom then the result is undefined.
1682   if (match(V, m_LShr(m_SignMask(), m_Value())))
1683     return true;
1684 
1685   // The remaining tests are all recursive, so bail out if we hit the limit.
1686   if (Depth++ == MaxDepth)
1687     return false;
1688 
1689   Value *X = nullptr, *Y = nullptr;
1690   // A shift left or a logical shift right of a power of two is a power of two
1691   // or zero.
1692   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1693                  match(V, m_LShr(m_Value(X), m_Value()))))
1694     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1695 
1696   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1697     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1698 
1699   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1700     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1701            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1702 
1703   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1704     // A power of two and'd with anything is a power of two or zero.
1705     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1706         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1707       return true;
1708     // X & (-X) is always a power of two or zero.
1709     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1710       return true;
1711     return false;
1712   }
1713 
1714   // Adding a power-of-two or zero to the same power-of-two or zero yields
1715   // either the original power-of-two, a larger power-of-two or zero.
1716   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1717     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1718     if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1719       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1720           match(X, m_And(m_Value(), m_Specific(Y))))
1721         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1722           return true;
1723       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1724           match(Y, m_And(m_Value(), m_Specific(X))))
1725         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1726           return true;
1727 
1728       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1729       KnownBits LHSBits(BitWidth);
1730       computeKnownBits(X, LHSBits, Depth, Q);
1731 
1732       KnownBits RHSBits(BitWidth);
1733       computeKnownBits(Y, RHSBits, Depth, Q);
1734       // If i8 V is a power of two or zero:
1735       //  ZeroBits: 1 1 1 0 1 1 1 1
1736       // ~ZeroBits: 0 0 0 1 0 0 0 0
1737       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1738         // If OrZero isn't set, we cannot give back a zero result.
1739         // Make sure either the LHS or RHS has a bit set.
1740         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1741           return true;
1742     }
1743   }
1744 
1745   // An exact divide or right shift can only shift off zero bits, so the result
1746   // is a power of two only if the first operand is a power of two and not
1747   // copying a sign bit (sdiv int_min, 2).
1748   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1749       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1750     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1751                                   Depth, Q);
1752   }
1753 
1754   return false;
1755 }
1756 
1757 /// \brief Test whether a GEP's result is known to be non-null.
1758 ///
1759 /// Uses properties inherent in a GEP to try to determine whether it is known
1760 /// to be non-null.
1761 ///
1762 /// Currently this routine does not support vector GEPs.
1763 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1764                               const Query &Q) {
1765   if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1766     return false;
1767 
1768   // FIXME: Support vector-GEPs.
1769   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1770 
1771   // If the base pointer is non-null, we cannot walk to a null address with an
1772   // inbounds GEP in address space zero.
1773   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1774     return true;
1775 
1776   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1777   // If so, then the GEP cannot produce a null pointer, as doing so would
1778   // inherently violate the inbounds contract within address space zero.
1779   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1780        GTI != GTE; ++GTI) {
1781     // Struct types are easy -- they must always be indexed by a constant.
1782     if (StructType *STy = GTI.getStructTypeOrNull()) {
1783       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1784       unsigned ElementIdx = OpC->getZExtValue();
1785       const StructLayout *SL = Q.DL.getStructLayout(STy);
1786       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1787       if (ElementOffset > 0)
1788         return true;
1789       continue;
1790     }
1791 
1792     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1793     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1794       continue;
1795 
1796     // Fast path the constant operand case both for efficiency and so we don't
1797     // increment Depth when just zipping down an all-constant GEP.
1798     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1799       if (!OpC->isZero())
1800         return true;
1801       continue;
1802     }
1803 
1804     // We post-increment Depth here because while isKnownNonZero increments it
1805     // as well, when we pop back up that increment won't persist. We don't want
1806     // to recurse 10k times just because we have 10k GEP operands. We don't
1807     // bail completely out because we want to handle constant GEPs regardless
1808     // of depth.
1809     if (Depth++ >= MaxDepth)
1810       continue;
1811 
1812     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1813       return true;
1814   }
1815 
1816   return false;
1817 }
1818 
1819 static bool isKnownNonNullFromDominatingCondition(const Value *V,
1820                                                   const Instruction *CtxI,
1821                                                   const DominatorTree *DT) {
1822   assert(V->getType()->isPointerTy() && "V must be pointer type");
1823   assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
1824 
1825   if (!CtxI || !DT)
1826     return false;
1827 
1828   unsigned NumUsesExplored = 0;
1829   for (auto *U : V->users()) {
1830     // Avoid massive lists
1831     if (NumUsesExplored >= DomConditionsMaxUses)
1832       break;
1833     NumUsesExplored++;
1834 
1835     // If the value is used as an argument to a call or invoke, then argument
1836     // attributes may provide an answer about null-ness.
1837     if (auto CS = ImmutableCallSite(U))
1838       if (auto *CalledFunc = CS.getCalledFunction())
1839         for (const Argument &Arg : CalledFunc->args())
1840           if (CS.getArgOperand(Arg.getArgNo()) == V &&
1841               Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1842             return true;
1843 
1844     // Consider only compare instructions uniquely controlling a branch
1845     CmpInst::Predicate Pred;
1846     if (!match(const_cast<User *>(U),
1847                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1848         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1849       continue;
1850 
1851     for (auto *CmpU : U->users()) {
1852       if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
1853         assert(BI->isConditional() && "uses a comparison!");
1854 
1855         BasicBlock *NonNullSuccessor =
1856             BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1857         BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1858         if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1859           return true;
1860       } else if (Pred == ICmpInst::ICMP_NE &&
1861                  match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
1862                  DT->dominates(cast<Instruction>(CmpU), CtxI)) {
1863         return true;
1864       }
1865     }
1866   }
1867 
1868   return false;
1869 }
1870 
1871 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1872 /// ensure that the value it's attached to is never Value?  'RangeType' is
1873 /// is the type of the value described by the range.
1874 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1875   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1876   assert(NumRanges >= 1);
1877   for (unsigned i = 0; i < NumRanges; ++i) {
1878     ConstantInt *Lower =
1879         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1880     ConstantInt *Upper =
1881         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1882     ConstantRange Range(Lower->getValue(), Upper->getValue());
1883     if (Range.contains(Value))
1884       return false;
1885   }
1886   return true;
1887 }
1888 
1889 /// Return true if the given value is known to be non-zero when defined. For
1890 /// vectors, return true if every element is known to be non-zero when
1891 /// defined. For pointers, if the context instruction and dominator tree are
1892 /// specified, perform context-sensitive analysis and return true if the
1893 /// pointer couldn't possibly be null at the specified instruction.
1894 /// Supports values with integer or pointer type and vectors of integers.
1895 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1896   if (auto *C = dyn_cast<Constant>(V)) {
1897     if (C->isNullValue())
1898       return false;
1899     if (isa<ConstantInt>(C))
1900       // Must be non-zero due to null test above.
1901       return true;
1902 
1903     // For constant vectors, check that all elements are undefined or known
1904     // non-zero to determine that the whole vector is known non-zero.
1905     if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1906       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1907         Constant *Elt = C->getAggregateElement(i);
1908         if (!Elt || Elt->isNullValue())
1909           return false;
1910         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1911           return false;
1912       }
1913       return true;
1914     }
1915 
1916     // A global variable in address space 0 is non null unless extern weak
1917     // or an absolute symbol reference. Other address spaces may have null as a
1918     // valid address for a global, so we can't assume anything.
1919     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1920       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
1921           GV->getType()->getAddressSpace() == 0)
1922         return true;
1923     } else
1924       return false;
1925   }
1926 
1927   if (auto *I = dyn_cast<Instruction>(V)) {
1928     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1929       // If the possible ranges don't contain zero, then the value is
1930       // definitely non-zero.
1931       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1932         const APInt ZeroValue(Ty->getBitWidth(), 0);
1933         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1934           return true;
1935       }
1936     }
1937   }
1938 
1939   // Check for pointer simplifications.
1940   if (V->getType()->isPointerTy()) {
1941     // Alloca never returns null, malloc might.
1942     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
1943       return true;
1944 
1945     // A byval, inalloca, or nonnull argument is never null.
1946     if (const Argument *A = dyn_cast<Argument>(V))
1947       if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
1948         return true;
1949 
1950     // A Load tagged with nonnull metadata is never null.
1951     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
1952       if (LI->getMetadata(LLVMContext::MD_nonnull))
1953         return true;
1954 
1955     if (auto CS = ImmutableCallSite(V))
1956       if (CS.isReturnNonNull())
1957         return true;
1958   }
1959 
1960   // The remaining tests are all recursive, so bail out if we hit the limit.
1961   if (Depth++ >= MaxDepth)
1962     return false;
1963 
1964   // Check for recursive pointer simplifications.
1965   if (V->getType()->isPointerTy()) {
1966     if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
1967       return true;
1968 
1969     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1970       if (isGEPKnownNonNull(GEP, Depth, Q))
1971         return true;
1972   }
1973 
1974   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1975 
1976   // X | Y != 0 if X != 0 or Y != 0.
1977   Value *X = nullptr, *Y = nullptr;
1978   if (match(V, m_Or(m_Value(X), m_Value(Y))))
1979     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1980 
1981   // ext X != 0 if X != 0.
1982   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1983     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1984 
1985   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
1986   // if the lowest bit is shifted off the end.
1987   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1988     // shl nuw can't remove any non-zero bits.
1989     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1990     if (BO->hasNoUnsignedWrap())
1991       return isKnownNonZero(X, Depth, Q);
1992 
1993     KnownBits Known(BitWidth);
1994     computeKnownBits(X, Known, Depth, Q);
1995     if (Known.One[0])
1996       return true;
1997   }
1998   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
1999   // defined if the sign bit is shifted off the end.
2000   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2001     // shr exact can only shift out zero bits.
2002     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2003     if (BO->isExact())
2004       return isKnownNonZero(X, Depth, Q);
2005 
2006     KnownBits Known = computeKnownBits(X, Depth, Q);
2007     if (Known.isNegative())
2008       return true;
2009 
2010     // If the shifter operand is a constant, and all of the bits shifted
2011     // out are known to be zero, and X is known non-zero then at least one
2012     // non-zero bit must remain.
2013     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2014       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2015       // Is there a known one in the portion not shifted out?
2016       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2017         return true;
2018       // Are all the bits to be shifted out known zero?
2019       if (Known.countMinTrailingZeros() >= ShiftVal)
2020         return isKnownNonZero(X, Depth, Q);
2021     }
2022   }
2023   // div exact can only produce a zero if the dividend is zero.
2024   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2025     return isKnownNonZero(X, Depth, Q);
2026   }
2027   // X + Y.
2028   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2029     KnownBits XKnown = computeKnownBits(X, Depth, Q);
2030     KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2031 
2032     // If X and Y are both non-negative (as signed values) then their sum is not
2033     // zero unless both X and Y are zero.
2034     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2035       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2036         return true;
2037 
2038     // If X and Y are both negative (as signed values) then their sum is not
2039     // zero unless both X and Y equal INT_MIN.
2040     if (XKnown.isNegative() && YKnown.isNegative()) {
2041       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2042       // The sign bit of X is set.  If some other bit is set then X is not equal
2043       // to INT_MIN.
2044       if (XKnown.One.intersects(Mask))
2045         return true;
2046       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2047       // to INT_MIN.
2048       if (YKnown.One.intersects(Mask))
2049         return true;
2050     }
2051 
2052     // The sum of a non-negative number and a power of two is not zero.
2053     if (XKnown.isNonNegative() &&
2054         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2055       return true;
2056     if (YKnown.isNonNegative() &&
2057         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2058       return true;
2059   }
2060   // X * Y.
2061   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2062     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2063     // If X and Y are non-zero then so is X * Y as long as the multiplication
2064     // does not overflow.
2065     if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
2066         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2067       return true;
2068   }
2069   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2070   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2071     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2072         isKnownNonZero(SI->getFalseValue(), Depth, Q))
2073       return true;
2074   }
2075   // PHI
2076   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2077     // Try and detect a recurrence that monotonically increases from a
2078     // starting value, as these are common as induction variables.
2079     if (PN->getNumIncomingValues() == 2) {
2080       Value *Start = PN->getIncomingValue(0);
2081       Value *Induction = PN->getIncomingValue(1);
2082       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2083         std::swap(Start, Induction);
2084       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2085         if (!C->isZero() && !C->isNegative()) {
2086           ConstantInt *X;
2087           if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2088                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2089               !X->isNegative())
2090             return true;
2091         }
2092       }
2093     }
2094     // Check if all incoming values are non-zero constant.
2095     bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2096       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2097     });
2098     if (AllNonZeroConstants)
2099       return true;
2100   }
2101 
2102   KnownBits Known(BitWidth);
2103   computeKnownBits(V, Known, Depth, Q);
2104   return Known.One != 0;
2105 }
2106 
2107 /// Return true if V2 == V1 + X, where X is known non-zero.
2108 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2109   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2110   if (!BO || BO->getOpcode() != Instruction::Add)
2111     return false;
2112   Value *Op = nullptr;
2113   if (V2 == BO->getOperand(0))
2114     Op = BO->getOperand(1);
2115   else if (V2 == BO->getOperand(1))
2116     Op = BO->getOperand(0);
2117   else
2118     return false;
2119   return isKnownNonZero(Op, 0, Q);
2120 }
2121 
2122 /// Return true if it is known that V1 != V2.
2123 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2124   if (V1 == V2)
2125     return false;
2126   if (V1->getType() != V2->getType())
2127     // We can't look through casts yet.
2128     return false;
2129   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2130     return true;
2131 
2132   if (V1->getType()->isIntOrIntVectorTy()) {
2133     // Are any known bits in V1 contradictory to known bits in V2? If V1
2134     // has a known zero where V2 has a known one, they must not be equal.
2135     KnownBits Known1 = computeKnownBits(V1, 0, Q);
2136     KnownBits Known2 = computeKnownBits(V2, 0, Q);
2137 
2138     if (Known1.Zero.intersects(Known2.One) ||
2139         Known2.Zero.intersects(Known1.One))
2140       return true;
2141   }
2142   return false;
2143 }
2144 
2145 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2146 /// simplify operations downstream. Mask is known to be zero for bits that V
2147 /// cannot have.
2148 ///
2149 /// This function is defined on values with integer type, values with pointer
2150 /// type, and vectors of integers.  In the case
2151 /// where V is a vector, the mask, known zero, and known one values are the
2152 /// same width as the vector element, and the bit is set only if it is true
2153 /// for all of the elements in the vector.
2154 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2155                        const Query &Q) {
2156   KnownBits Known(Mask.getBitWidth());
2157   computeKnownBits(V, Known, Depth, Q);
2158   return Mask.isSubsetOf(Known.Zero);
2159 }
2160 
2161 /// For vector constants, loop over the elements and find the constant with the
2162 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2163 /// or if any element was not analyzed; otherwise, return the count for the
2164 /// element with the minimum number of sign bits.
2165 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2166                                                  unsigned TyBits) {
2167   const auto *CV = dyn_cast<Constant>(V);
2168   if (!CV || !CV->getType()->isVectorTy())
2169     return 0;
2170 
2171   unsigned MinSignBits = TyBits;
2172   unsigned NumElts = CV->getType()->getVectorNumElements();
2173   for (unsigned i = 0; i != NumElts; ++i) {
2174     // If we find a non-ConstantInt, bail out.
2175     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2176     if (!Elt)
2177       return 0;
2178 
2179     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2180   }
2181 
2182   return MinSignBits;
2183 }
2184 
2185 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2186                                        const Query &Q);
2187 
2188 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2189                                    const Query &Q) {
2190   unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2191   assert(Result > 0 && "At least one sign bit needs to be present!");
2192   return Result;
2193 }
2194 
2195 /// Return the number of times the sign bit of the register is replicated into
2196 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2197 /// (itself), but other cases can give us information. For example, immediately
2198 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2199 /// other, so we return 3. For vectors, return the number of sign bits for the
2200 /// vector element with the minimum number of known sign bits.
2201 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2202                                        const Query &Q) {
2203   assert(Depth <= MaxDepth && "Limit Search Depth");
2204 
2205   // We return the minimum number of sign bits that are guaranteed to be present
2206   // in V, so for undef we have to conservatively return 1.  We don't have the
2207   // same behavior for poison though -- that's a FIXME today.
2208 
2209   Type *ScalarTy = V->getType()->getScalarType();
2210   unsigned TyBits = ScalarTy->isPointerTy() ?
2211     Q.DL.getIndexTypeSizeInBits(ScalarTy) :
2212     Q.DL.getTypeSizeInBits(ScalarTy);
2213 
2214   unsigned Tmp, Tmp2;
2215   unsigned FirstAnswer = 1;
2216 
2217   // Note that ConstantInt is handled by the general computeKnownBits case
2218   // below.
2219 
2220   if (Depth == MaxDepth)
2221     return 1;  // Limit search depth.
2222 
2223   const Operator *U = dyn_cast<Operator>(V);
2224   switch (Operator::getOpcode(V)) {
2225   default: break;
2226   case Instruction::SExt:
2227     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2228     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2229 
2230   case Instruction::SDiv: {
2231     const APInt *Denominator;
2232     // sdiv X, C -> adds log(C) sign bits.
2233     if (match(U->getOperand(1), m_APInt(Denominator))) {
2234 
2235       // Ignore non-positive denominator.
2236       if (!Denominator->isStrictlyPositive())
2237         break;
2238 
2239       // Calculate the incoming numerator bits.
2240       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2241 
2242       // Add floor(log(C)) bits to the numerator bits.
2243       return std::min(TyBits, NumBits + Denominator->logBase2());
2244     }
2245     break;
2246   }
2247 
2248   case Instruction::SRem: {
2249     const APInt *Denominator;
2250     // srem X, C -> we know that the result is within [-C+1,C) when C is a
2251     // positive constant.  This let us put a lower bound on the number of sign
2252     // bits.
2253     if (match(U->getOperand(1), m_APInt(Denominator))) {
2254 
2255       // Ignore non-positive denominator.
2256       if (!Denominator->isStrictlyPositive())
2257         break;
2258 
2259       // Calculate the incoming numerator bits. SRem by a positive constant
2260       // can't lower the number of sign bits.
2261       unsigned NumrBits =
2262           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2263 
2264       // Calculate the leading sign bit constraints by examining the
2265       // denominator.  Given that the denominator is positive, there are two
2266       // cases:
2267       //
2268       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
2269       //     (1 << ceilLogBase2(C)).
2270       //
2271       //  2. the numerator is negative.  Then the result range is (-C,0] and
2272       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2273       //
2274       // Thus a lower bound on the number of sign bits is `TyBits -
2275       // ceilLogBase2(C)`.
2276 
2277       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2278       return std::max(NumrBits, ResBits);
2279     }
2280     break;
2281   }
2282 
2283   case Instruction::AShr: {
2284     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2285     // ashr X, C   -> adds C sign bits.  Vectors too.
2286     const APInt *ShAmt;
2287     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2288       if (ShAmt->uge(TyBits))
2289         break;  // Bad shift.
2290       unsigned ShAmtLimited = ShAmt->getZExtValue();
2291       Tmp += ShAmtLimited;
2292       if (Tmp > TyBits) Tmp = TyBits;
2293     }
2294     return Tmp;
2295   }
2296   case Instruction::Shl: {
2297     const APInt *ShAmt;
2298     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2299       // shl destroys sign bits.
2300       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2301       if (ShAmt->uge(TyBits) ||      // Bad shift.
2302           ShAmt->uge(Tmp)) break;    // Shifted all sign bits out.
2303       Tmp2 = ShAmt->getZExtValue();
2304       return Tmp - Tmp2;
2305     }
2306     break;
2307   }
2308   case Instruction::And:
2309   case Instruction::Or:
2310   case Instruction::Xor:    // NOT is handled here.
2311     // Logical binary ops preserve the number of sign bits at the worst.
2312     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2313     if (Tmp != 1) {
2314       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2315       FirstAnswer = std::min(Tmp, Tmp2);
2316       // We computed what we know about the sign bits as our first
2317       // answer. Now proceed to the generic code that uses
2318       // computeKnownBits, and pick whichever answer is better.
2319     }
2320     break;
2321 
2322   case Instruction::Select:
2323     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2324     if (Tmp == 1) return 1;  // Early out.
2325     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2326     return std::min(Tmp, Tmp2);
2327 
2328   case Instruction::Add:
2329     // Add can have at most one carry bit.  Thus we know that the output
2330     // is, at worst, one more bit than the inputs.
2331     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2332     if (Tmp == 1) return 1;  // Early out.
2333 
2334     // Special case decrementing a value (ADD X, -1):
2335     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2336       if (CRHS->isAllOnesValue()) {
2337         KnownBits Known(TyBits);
2338         computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2339 
2340         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2341         // sign bits set.
2342         if ((Known.Zero | 1).isAllOnesValue())
2343           return TyBits;
2344 
2345         // If we are subtracting one from a positive number, there is no carry
2346         // out of the result.
2347         if (Known.isNonNegative())
2348           return Tmp;
2349       }
2350 
2351     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2352     if (Tmp2 == 1) return 1;
2353     return std::min(Tmp, Tmp2)-1;
2354 
2355   case Instruction::Sub:
2356     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2357     if (Tmp2 == 1) return 1;
2358 
2359     // Handle NEG.
2360     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2361       if (CLHS->isNullValue()) {
2362         KnownBits Known(TyBits);
2363         computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2364         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2365         // sign bits set.
2366         if ((Known.Zero | 1).isAllOnesValue())
2367           return TyBits;
2368 
2369         // If the input is known to be positive (the sign bit is known clear),
2370         // the output of the NEG has the same number of sign bits as the input.
2371         if (Known.isNonNegative())
2372           return Tmp2;
2373 
2374         // Otherwise, we treat this like a SUB.
2375       }
2376 
2377     // Sub can have at most one carry bit.  Thus we know that the output
2378     // is, at worst, one more bit than the inputs.
2379     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2380     if (Tmp == 1) return 1;  // Early out.
2381     return std::min(Tmp, Tmp2)-1;
2382 
2383   case Instruction::Mul: {
2384     // The output of the Mul can be at most twice the valid bits in the inputs.
2385     unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2386     if (SignBitsOp0 == 1) return 1;  // Early out.
2387     unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2388     if (SignBitsOp1 == 1) return 1;
2389     unsigned OutValidBits =
2390         (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2391     return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2392   }
2393 
2394   case Instruction::PHI: {
2395     const PHINode *PN = cast<PHINode>(U);
2396     unsigned NumIncomingValues = PN->getNumIncomingValues();
2397     // Don't analyze large in-degree PHIs.
2398     if (NumIncomingValues > 4) break;
2399     // Unreachable blocks may have zero-operand PHI nodes.
2400     if (NumIncomingValues == 0) break;
2401 
2402     // Take the minimum of all incoming values.  This can't infinitely loop
2403     // because of our depth threshold.
2404     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2405     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2406       if (Tmp == 1) return Tmp;
2407       Tmp = std::min(
2408           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2409     }
2410     return Tmp;
2411   }
2412 
2413   case Instruction::Trunc:
2414     // FIXME: it's tricky to do anything useful for this, but it is an important
2415     // case for targets like X86.
2416     break;
2417 
2418   case Instruction::ExtractElement:
2419     // Look through extract element. At the moment we keep this simple and skip
2420     // tracking the specific element. But at least we might find information
2421     // valid for all elements of the vector (for example if vector is sign
2422     // extended, shifted, etc).
2423     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2424   }
2425 
2426   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2427   // use this information.
2428 
2429   // If we can examine all elements of a vector constant successfully, we're
2430   // done (we can't do any better than that). If not, keep trying.
2431   if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2432     return VecSignBits;
2433 
2434   KnownBits Known(TyBits);
2435   computeKnownBits(V, Known, Depth, Q);
2436 
2437   // If we know that the sign bit is either zero or one, determine the number of
2438   // identical bits in the top of the input value.
2439   return std::max(FirstAnswer, Known.countMinSignBits());
2440 }
2441 
2442 /// This function computes the integer multiple of Base that equals V.
2443 /// If successful, it returns true and returns the multiple in
2444 /// Multiple. If unsuccessful, it returns false. It looks
2445 /// through SExt instructions only if LookThroughSExt is true.
2446 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2447                            bool LookThroughSExt, unsigned Depth) {
2448   const unsigned MaxDepth = 6;
2449 
2450   assert(V && "No Value?");
2451   assert(Depth <= MaxDepth && "Limit Search Depth");
2452   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2453 
2454   Type *T = V->getType();
2455 
2456   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2457 
2458   if (Base == 0)
2459     return false;
2460 
2461   if (Base == 1) {
2462     Multiple = V;
2463     return true;
2464   }
2465 
2466   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2467   Constant *BaseVal = ConstantInt::get(T, Base);
2468   if (CO && CO == BaseVal) {
2469     // Multiple is 1.
2470     Multiple = ConstantInt::get(T, 1);
2471     return true;
2472   }
2473 
2474   if (CI && CI->getZExtValue() % Base == 0) {
2475     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2476     return true;
2477   }
2478 
2479   if (Depth == MaxDepth) return false;  // Limit search depth.
2480 
2481   Operator *I = dyn_cast<Operator>(V);
2482   if (!I) return false;
2483 
2484   switch (I->getOpcode()) {
2485   default: break;
2486   case Instruction::SExt:
2487     if (!LookThroughSExt) return false;
2488     // otherwise fall through to ZExt
2489     LLVM_FALLTHROUGH;
2490   case Instruction::ZExt:
2491     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2492                            LookThroughSExt, Depth+1);
2493   case Instruction::Shl:
2494   case Instruction::Mul: {
2495     Value *Op0 = I->getOperand(0);
2496     Value *Op1 = I->getOperand(1);
2497 
2498     if (I->getOpcode() == Instruction::Shl) {
2499       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2500       if (!Op1CI) return false;
2501       // Turn Op0 << Op1 into Op0 * 2^Op1
2502       APInt Op1Int = Op1CI->getValue();
2503       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2504       APInt API(Op1Int.getBitWidth(), 0);
2505       API.setBit(BitToSet);
2506       Op1 = ConstantInt::get(V->getContext(), API);
2507     }
2508 
2509     Value *Mul0 = nullptr;
2510     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2511       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2512         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2513           if (Op1C->getType()->getPrimitiveSizeInBits() <
2514               MulC->getType()->getPrimitiveSizeInBits())
2515             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2516           if (Op1C->getType()->getPrimitiveSizeInBits() >
2517               MulC->getType()->getPrimitiveSizeInBits())
2518             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2519 
2520           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2521           Multiple = ConstantExpr::getMul(MulC, Op1C);
2522           return true;
2523         }
2524 
2525       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2526         if (Mul0CI->getValue() == 1) {
2527           // V == Base * Op1, so return Op1
2528           Multiple = Op1;
2529           return true;
2530         }
2531     }
2532 
2533     Value *Mul1 = nullptr;
2534     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2535       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2536         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2537           if (Op0C->getType()->getPrimitiveSizeInBits() <
2538               MulC->getType()->getPrimitiveSizeInBits())
2539             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2540           if (Op0C->getType()->getPrimitiveSizeInBits() >
2541               MulC->getType()->getPrimitiveSizeInBits())
2542             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2543 
2544           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2545           Multiple = ConstantExpr::getMul(MulC, Op0C);
2546           return true;
2547         }
2548 
2549       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2550         if (Mul1CI->getValue() == 1) {
2551           // V == Base * Op0, so return Op0
2552           Multiple = Op0;
2553           return true;
2554         }
2555     }
2556   }
2557   }
2558 
2559   // We could not determine if V is a multiple of Base.
2560   return false;
2561 }
2562 
2563 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2564                                             const TargetLibraryInfo *TLI) {
2565   const Function *F = ICS.getCalledFunction();
2566   if (!F)
2567     return Intrinsic::not_intrinsic;
2568 
2569   if (F->isIntrinsic())
2570     return F->getIntrinsicID();
2571 
2572   if (!TLI)
2573     return Intrinsic::not_intrinsic;
2574 
2575   LibFunc Func;
2576   // We're going to make assumptions on the semantics of the functions, check
2577   // that the target knows that it's available in this environment and it does
2578   // not have local linkage.
2579   if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2580     return Intrinsic::not_intrinsic;
2581 
2582   if (!ICS.onlyReadsMemory())
2583     return Intrinsic::not_intrinsic;
2584 
2585   // Otherwise check if we have a call to a function that can be turned into a
2586   // vector intrinsic.
2587   switch (Func) {
2588   default:
2589     break;
2590   case LibFunc_sin:
2591   case LibFunc_sinf:
2592   case LibFunc_sinl:
2593     return Intrinsic::sin;
2594   case LibFunc_cos:
2595   case LibFunc_cosf:
2596   case LibFunc_cosl:
2597     return Intrinsic::cos;
2598   case LibFunc_exp:
2599   case LibFunc_expf:
2600   case LibFunc_expl:
2601     return Intrinsic::exp;
2602   case LibFunc_exp2:
2603   case LibFunc_exp2f:
2604   case LibFunc_exp2l:
2605     return Intrinsic::exp2;
2606   case LibFunc_log:
2607   case LibFunc_logf:
2608   case LibFunc_logl:
2609     return Intrinsic::log;
2610   case LibFunc_log10:
2611   case LibFunc_log10f:
2612   case LibFunc_log10l:
2613     return Intrinsic::log10;
2614   case LibFunc_log2:
2615   case LibFunc_log2f:
2616   case LibFunc_log2l:
2617     return Intrinsic::log2;
2618   case LibFunc_fabs:
2619   case LibFunc_fabsf:
2620   case LibFunc_fabsl:
2621     return Intrinsic::fabs;
2622   case LibFunc_fmin:
2623   case LibFunc_fminf:
2624   case LibFunc_fminl:
2625     return Intrinsic::minnum;
2626   case LibFunc_fmax:
2627   case LibFunc_fmaxf:
2628   case LibFunc_fmaxl:
2629     return Intrinsic::maxnum;
2630   case LibFunc_copysign:
2631   case LibFunc_copysignf:
2632   case LibFunc_copysignl:
2633     return Intrinsic::copysign;
2634   case LibFunc_floor:
2635   case LibFunc_floorf:
2636   case LibFunc_floorl:
2637     return Intrinsic::floor;
2638   case LibFunc_ceil:
2639   case LibFunc_ceilf:
2640   case LibFunc_ceill:
2641     return Intrinsic::ceil;
2642   case LibFunc_trunc:
2643   case LibFunc_truncf:
2644   case LibFunc_truncl:
2645     return Intrinsic::trunc;
2646   case LibFunc_rint:
2647   case LibFunc_rintf:
2648   case LibFunc_rintl:
2649     return Intrinsic::rint;
2650   case LibFunc_nearbyint:
2651   case LibFunc_nearbyintf:
2652   case LibFunc_nearbyintl:
2653     return Intrinsic::nearbyint;
2654   case LibFunc_round:
2655   case LibFunc_roundf:
2656   case LibFunc_roundl:
2657     return Intrinsic::round;
2658   case LibFunc_pow:
2659   case LibFunc_powf:
2660   case LibFunc_powl:
2661     return Intrinsic::pow;
2662   case LibFunc_sqrt:
2663   case LibFunc_sqrtf:
2664   case LibFunc_sqrtl:
2665     return Intrinsic::sqrt;
2666   }
2667 
2668   return Intrinsic::not_intrinsic;
2669 }
2670 
2671 /// Return true if we can prove that the specified FP value is never equal to
2672 /// -0.0.
2673 ///
2674 /// NOTE: this function will need to be revisited when we support non-default
2675 /// rounding modes!
2676 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2677                                 unsigned Depth) {
2678   if (auto *CFP = dyn_cast<ConstantFP>(V))
2679     return !CFP->getValueAPF().isNegZero();
2680 
2681   // Limit search depth.
2682   if (Depth == MaxDepth)
2683     return false;
2684 
2685   auto *Op = dyn_cast<Operator>(V);
2686   if (!Op)
2687     return false;
2688 
2689   // Check if the nsz fast-math flag is set.
2690   if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2691     if (FPO->hasNoSignedZeros())
2692       return true;
2693 
2694   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2695   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
2696     return true;
2697 
2698   // sitofp and uitofp turn into +0.0 for zero.
2699   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2700     return true;
2701 
2702   if (auto *Call = dyn_cast<CallInst>(Op)) {
2703     Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2704     switch (IID) {
2705     default:
2706       break;
2707     // sqrt(-0.0) = -0.0, no other negative results are possible.
2708     case Intrinsic::sqrt:
2709       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2710     // fabs(x) != -0.0
2711     case Intrinsic::fabs:
2712       return true;
2713     }
2714   }
2715 
2716   return false;
2717 }
2718 
2719 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2720 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2721 /// bit despite comparing equal.
2722 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2723                                             const TargetLibraryInfo *TLI,
2724                                             bool SignBitOnly,
2725                                             unsigned Depth) {
2726   // TODO: This function does not do the right thing when SignBitOnly is true
2727   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2728   // which flips the sign bits of NaNs.  See
2729   // https://llvm.org/bugs/show_bug.cgi?id=31702.
2730 
2731   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2732     return !CFP->getValueAPF().isNegative() ||
2733            (!SignBitOnly && CFP->getValueAPF().isZero());
2734   }
2735 
2736   // Handle vector of constants.
2737   if (auto *CV = dyn_cast<Constant>(V)) {
2738     if (CV->getType()->isVectorTy()) {
2739       unsigned NumElts = CV->getType()->getVectorNumElements();
2740       for (unsigned i = 0; i != NumElts; ++i) {
2741         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
2742         if (!CFP)
2743           return false;
2744         if (CFP->getValueAPF().isNegative() &&
2745             (SignBitOnly || !CFP->getValueAPF().isZero()))
2746           return false;
2747       }
2748 
2749       // All non-negative ConstantFPs.
2750       return true;
2751     }
2752   }
2753 
2754   if (Depth == MaxDepth)
2755     return false; // Limit search depth.
2756 
2757   const Operator *I = dyn_cast<Operator>(V);
2758   if (!I)
2759     return false;
2760 
2761   switch (I->getOpcode()) {
2762   default:
2763     break;
2764   // Unsigned integers are always nonnegative.
2765   case Instruction::UIToFP:
2766     return true;
2767   case Instruction::FMul:
2768     // x*x is always non-negative or a NaN.
2769     if (I->getOperand(0) == I->getOperand(1) &&
2770         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2771       return true;
2772 
2773     LLVM_FALLTHROUGH;
2774   case Instruction::FAdd:
2775   case Instruction::FDiv:
2776   case Instruction::FRem:
2777     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2778                                            Depth + 1) &&
2779            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2780                                            Depth + 1);
2781   case Instruction::Select:
2782     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2783                                            Depth + 1) &&
2784            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2785                                            Depth + 1);
2786   case Instruction::FPExt:
2787   case Instruction::FPTrunc:
2788     // Widening/narrowing never change sign.
2789     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2790                                            Depth + 1);
2791   case Instruction::ExtractElement:
2792     // Look through extract element. At the moment we keep this simple and skip
2793     // tracking the specific element. But at least we might find information
2794     // valid for all elements of the vector.
2795     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2796                                            Depth + 1);
2797   case Instruction::Call:
2798     const auto *CI = cast<CallInst>(I);
2799     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2800     switch (IID) {
2801     default:
2802       break;
2803     case Intrinsic::maxnum:
2804       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2805                                              Depth + 1) ||
2806              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2807                                              Depth + 1);
2808     case Intrinsic::minnum:
2809       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2810                                              Depth + 1) &&
2811              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2812                                              Depth + 1);
2813     case Intrinsic::exp:
2814     case Intrinsic::exp2:
2815     case Intrinsic::fabs:
2816       return true;
2817 
2818     case Intrinsic::sqrt:
2819       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
2820       if (!SignBitOnly)
2821         return true;
2822       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2823                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
2824 
2825     case Intrinsic::powi:
2826       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2827         // powi(x,n) is non-negative if n is even.
2828         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2829           return true;
2830       }
2831       // TODO: This is not correct.  Given that exp is an integer, here are the
2832       // ways that pow can return a negative value:
2833       //
2834       //   pow(x, exp)    --> negative if exp is odd and x is negative.
2835       //   pow(-0, exp)   --> -inf if exp is negative odd.
2836       //   pow(-0, exp)   --> -0 if exp is positive odd.
2837       //   pow(-inf, exp) --> -0 if exp is negative odd.
2838       //   pow(-inf, exp) --> -inf if exp is positive odd.
2839       //
2840       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2841       // but we must return false if x == -0.  Unfortunately we do not currently
2842       // have a way of expressing this constraint.  See details in
2843       // https://llvm.org/bugs/show_bug.cgi?id=31702.
2844       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2845                                              Depth + 1);
2846 
2847     case Intrinsic::fma:
2848     case Intrinsic::fmuladd:
2849       // x*x+y is non-negative if y is non-negative.
2850       return I->getOperand(0) == I->getOperand(1) &&
2851              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2852              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2853                                              Depth + 1);
2854     }
2855     break;
2856   }
2857   return false;
2858 }
2859 
2860 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2861                                        const TargetLibraryInfo *TLI) {
2862   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2863 }
2864 
2865 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
2866   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2867 }
2868 
2869 bool llvm::isKnownNeverNaN(const Value *V) {
2870   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
2871 
2872   // If we're told that NaNs won't happen, assume they won't.
2873   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
2874     if (FPMathOp->hasNoNaNs())
2875       return true;
2876 
2877   // TODO: Handle instructions and potentially recurse like other 'isKnown'
2878   // functions. For example, the result of sitofp is never NaN.
2879 
2880   // Handle scalar constants.
2881   if (auto *CFP = dyn_cast<ConstantFP>(V))
2882     return !CFP->isNaN();
2883 
2884   // Bail out for constant expressions, but try to handle vector constants.
2885   if (!V->getType()->isVectorTy() || !isa<Constant>(V))
2886     return false;
2887 
2888   // For vectors, verify that each element is not NaN.
2889   unsigned NumElts = V->getType()->getVectorNumElements();
2890   for (unsigned i = 0; i != NumElts; ++i) {
2891     Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
2892     if (!Elt)
2893       return false;
2894     if (isa<UndefValue>(Elt))
2895       continue;
2896     auto *CElt = dyn_cast<ConstantFP>(Elt);
2897     if (!CElt || CElt->isNaN())
2898       return false;
2899   }
2900   // All elements were confirmed not-NaN or undefined.
2901   return true;
2902 }
2903 
2904 /// If the specified value can be set by repeating the same byte in memory,
2905 /// return the i8 value that it is represented with.  This is
2906 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2907 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
2908 /// byte store (e.g. i16 0x1234), return null.
2909 Value *llvm::isBytewiseValue(Value *V) {
2910   // All byte-wide stores are splatable, even of arbitrary variables.
2911   if (V->getType()->isIntegerTy(8)) return V;
2912 
2913   // Handle 'null' ConstantArrayZero etc.
2914   if (Constant *C = dyn_cast<Constant>(V))
2915     if (C->isNullValue())
2916       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2917 
2918   // Constant float and double values can be handled as integer values if the
2919   // corresponding integer value is "byteable".  An important case is 0.0.
2920   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2921     if (CFP->getType()->isFloatTy())
2922       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2923     if (CFP->getType()->isDoubleTy())
2924       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2925     // Don't handle long double formats, which have strange constraints.
2926   }
2927 
2928   // We can handle constant integers that are multiple of 8 bits.
2929   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2930     if (CI->getBitWidth() % 8 == 0) {
2931       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2932 
2933       if (!CI->getValue().isSplat(8))
2934         return nullptr;
2935       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2936     }
2937   }
2938 
2939   // A ConstantDataArray/Vector is splatable if all its members are equal and
2940   // also splatable.
2941   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2942     Value *Elt = CA->getElementAsConstant(0);
2943     Value *Val = isBytewiseValue(Elt);
2944     if (!Val)
2945       return nullptr;
2946 
2947     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2948       if (CA->getElementAsConstant(I) != Elt)
2949         return nullptr;
2950 
2951     return Val;
2952   }
2953 
2954   // Conceptually, we could handle things like:
2955   //   %a = zext i8 %X to i16
2956   //   %b = shl i16 %a, 8
2957   //   %c = or i16 %a, %b
2958   // but until there is an example that actually needs this, it doesn't seem
2959   // worth worrying about.
2960   return nullptr;
2961 }
2962 
2963 // This is the recursive version of BuildSubAggregate. It takes a few different
2964 // arguments. Idxs is the index within the nested struct From that we are
2965 // looking at now (which is of type IndexedType). IdxSkip is the number of
2966 // indices from Idxs that should be left out when inserting into the resulting
2967 // struct. To is the result struct built so far, new insertvalue instructions
2968 // build on that.
2969 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2970                                 SmallVectorImpl<unsigned> &Idxs,
2971                                 unsigned IdxSkip,
2972                                 Instruction *InsertBefore) {
2973   StructType *STy = dyn_cast<StructType>(IndexedType);
2974   if (STy) {
2975     // Save the original To argument so we can modify it
2976     Value *OrigTo = To;
2977     // General case, the type indexed by Idxs is a struct
2978     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2979       // Process each struct element recursively
2980       Idxs.push_back(i);
2981       Value *PrevTo = To;
2982       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2983                              InsertBefore);
2984       Idxs.pop_back();
2985       if (!To) {
2986         // Couldn't find any inserted value for this index? Cleanup
2987         while (PrevTo != OrigTo) {
2988           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2989           PrevTo = Del->getAggregateOperand();
2990           Del->eraseFromParent();
2991         }
2992         // Stop processing elements
2993         break;
2994       }
2995     }
2996     // If we successfully found a value for each of our subaggregates
2997     if (To)
2998       return To;
2999   }
3000   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3001   // the struct's elements had a value that was inserted directly. In the latter
3002   // case, perhaps we can't determine each of the subelements individually, but
3003   // we might be able to find the complete struct somewhere.
3004 
3005   // Find the value that is at that particular spot
3006   Value *V = FindInsertedValue(From, Idxs);
3007 
3008   if (!V)
3009     return nullptr;
3010 
3011   // Insert the value in the new (sub) aggregate
3012   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3013                                  "tmp", InsertBefore);
3014 }
3015 
3016 // This helper takes a nested struct and extracts a part of it (which is again a
3017 // struct) into a new value. For example, given the struct:
3018 // { a, { b, { c, d }, e } }
3019 // and the indices "1, 1" this returns
3020 // { c, d }.
3021 //
3022 // It does this by inserting an insertvalue for each element in the resulting
3023 // struct, as opposed to just inserting a single struct. This will only work if
3024 // each of the elements of the substruct are known (ie, inserted into From by an
3025 // insertvalue instruction somewhere).
3026 //
3027 // All inserted insertvalue instructions are inserted before InsertBefore
3028 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3029                                 Instruction *InsertBefore) {
3030   assert(InsertBefore && "Must have someplace to insert!");
3031   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3032                                                              idx_range);
3033   Value *To = UndefValue::get(IndexedType);
3034   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3035   unsigned IdxSkip = Idxs.size();
3036 
3037   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3038 }
3039 
3040 /// Given an aggregate and a sequence of indices, see if the scalar value
3041 /// indexed is already around as a register, for example if it was inserted
3042 /// directly into the aggregate.
3043 ///
3044 /// If InsertBefore is not null, this function will duplicate (modified)
3045 /// insertvalues when a part of a nested struct is extracted.
3046 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3047                                Instruction *InsertBefore) {
3048   // Nothing to index? Just return V then (this is useful at the end of our
3049   // recursion).
3050   if (idx_range.empty())
3051     return V;
3052   // We have indices, so V should have an indexable type.
3053   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3054          "Not looking at a struct or array?");
3055   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3056          "Invalid indices for type?");
3057 
3058   if (Constant *C = dyn_cast<Constant>(V)) {
3059     C = C->getAggregateElement(idx_range[0]);
3060     if (!C) return nullptr;
3061     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3062   }
3063 
3064   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3065     // Loop the indices for the insertvalue instruction in parallel with the
3066     // requested indices
3067     const unsigned *req_idx = idx_range.begin();
3068     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3069          i != e; ++i, ++req_idx) {
3070       if (req_idx == idx_range.end()) {
3071         // We can't handle this without inserting insertvalues
3072         if (!InsertBefore)
3073           return nullptr;
3074 
3075         // The requested index identifies a part of a nested aggregate. Handle
3076         // this specially. For example,
3077         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3078         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3079         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3080         // This can be changed into
3081         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3082         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3083         // which allows the unused 0,0 element from the nested struct to be
3084         // removed.
3085         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3086                                  InsertBefore);
3087       }
3088 
3089       // This insert value inserts something else than what we are looking for.
3090       // See if the (aggregate) value inserted into has the value we are
3091       // looking for, then.
3092       if (*req_idx != *i)
3093         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3094                                  InsertBefore);
3095     }
3096     // If we end up here, the indices of the insertvalue match with those
3097     // requested (though possibly only partially). Now we recursively look at
3098     // the inserted value, passing any remaining indices.
3099     return FindInsertedValue(I->getInsertedValueOperand(),
3100                              makeArrayRef(req_idx, idx_range.end()),
3101                              InsertBefore);
3102   }
3103 
3104   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3105     // If we're extracting a value from an aggregate that was extracted from
3106     // something else, we can extract from that something else directly instead.
3107     // However, we will need to chain I's indices with the requested indices.
3108 
3109     // Calculate the number of indices required
3110     unsigned size = I->getNumIndices() + idx_range.size();
3111     // Allocate some space to put the new indices in
3112     SmallVector<unsigned, 5> Idxs;
3113     Idxs.reserve(size);
3114     // Add indices from the extract value instruction
3115     Idxs.append(I->idx_begin(), I->idx_end());
3116 
3117     // Add requested indices
3118     Idxs.append(idx_range.begin(), idx_range.end());
3119 
3120     assert(Idxs.size() == size
3121            && "Number of indices added not correct?");
3122 
3123     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3124   }
3125   // Otherwise, we don't know (such as, extracting from a function return value
3126   // or load instruction)
3127   return nullptr;
3128 }
3129 
3130 /// Analyze the specified pointer to see if it can be expressed as a base
3131 /// pointer plus a constant offset. Return the base and offset to the caller.
3132 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
3133                                               const DataLayout &DL) {
3134   unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType());
3135   APInt ByteOffset(BitWidth, 0);
3136 
3137   // We walk up the defs but use a visited set to handle unreachable code. In
3138   // that case, we stop after accumulating the cycle once (not that it
3139   // matters).
3140   SmallPtrSet<Value *, 16> Visited;
3141   while (Visited.insert(Ptr).second) {
3142     if (Ptr->getType()->isVectorTy())
3143       break;
3144 
3145     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
3146       // If one of the values we have visited is an addrspacecast, then
3147       // the pointer type of this GEP may be different from the type
3148       // of the Ptr parameter which was passed to this function.  This
3149       // means when we construct GEPOffset, we need to use the size
3150       // of GEP's pointer type rather than the size of the original
3151       // pointer type.
3152       APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
3153       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
3154         break;
3155 
3156       ByteOffset += GEPOffset.getSExtValue();
3157 
3158       Ptr = GEP->getPointerOperand();
3159     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
3160                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
3161       Ptr = cast<Operator>(Ptr)->getOperand(0);
3162     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
3163       if (GA->isInterposable())
3164         break;
3165       Ptr = GA->getAliasee();
3166     } else {
3167       break;
3168     }
3169   }
3170   Offset = ByteOffset.getSExtValue();
3171   return Ptr;
3172 }
3173 
3174 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3175                                        unsigned CharSize) {
3176   // Make sure the GEP has exactly three arguments.
3177   if (GEP->getNumOperands() != 3)
3178     return false;
3179 
3180   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3181   // CharSize.
3182   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3183   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3184     return false;
3185 
3186   // Check to make sure that the first operand of the GEP is an integer and
3187   // has value 0 so that we are sure we're indexing into the initializer.
3188   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3189   if (!FirstIdx || !FirstIdx->isZero())
3190     return false;
3191 
3192   return true;
3193 }
3194 
3195 bool llvm::getConstantDataArrayInfo(const Value *V,
3196                                     ConstantDataArraySlice &Slice,
3197                                     unsigned ElementSize, uint64_t Offset) {
3198   assert(V);
3199 
3200   // Look through bitcast instructions and geps.
3201   V = V->stripPointerCasts();
3202 
3203   // If the value is a GEP instruction or constant expression, treat it as an
3204   // offset.
3205   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3206     // The GEP operator should be based on a pointer to string constant, and is
3207     // indexing into the string constant.
3208     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3209       return false;
3210 
3211     // If the second index isn't a ConstantInt, then this is a variable index
3212     // into the array.  If this occurs, we can't say anything meaningful about
3213     // the string.
3214     uint64_t StartIdx = 0;
3215     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3216       StartIdx = CI->getZExtValue();
3217     else
3218       return false;
3219     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3220                                     StartIdx + Offset);
3221   }
3222 
3223   // The GEP instruction, constant or instruction, must reference a global
3224   // variable that is a constant and is initialized. The referenced constant
3225   // initializer is the array that we'll use for optimization.
3226   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3227   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3228     return false;
3229 
3230   const ConstantDataArray *Array;
3231   ArrayType *ArrayTy;
3232   if (GV->getInitializer()->isNullValue()) {
3233     Type *GVTy = GV->getValueType();
3234     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3235       // A zeroinitializer for the array; there is no ConstantDataArray.
3236       Array = nullptr;
3237     } else {
3238       const DataLayout &DL = GV->getParent()->getDataLayout();
3239       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3240       uint64_t Length = SizeInBytes / (ElementSize / 8);
3241       if (Length <= Offset)
3242         return false;
3243 
3244       Slice.Array = nullptr;
3245       Slice.Offset = 0;
3246       Slice.Length = Length - Offset;
3247       return true;
3248     }
3249   } else {
3250     // This must be a ConstantDataArray.
3251     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3252     if (!Array)
3253       return false;
3254     ArrayTy = Array->getType();
3255   }
3256   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3257     return false;
3258 
3259   uint64_t NumElts = ArrayTy->getArrayNumElements();
3260   if (Offset > NumElts)
3261     return false;
3262 
3263   Slice.Array = Array;
3264   Slice.Offset = Offset;
3265   Slice.Length = NumElts - Offset;
3266   return true;
3267 }
3268 
3269 /// This function computes the length of a null-terminated C string pointed to
3270 /// by V. If successful, it returns true and returns the string in Str.
3271 /// If unsuccessful, it returns false.
3272 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3273                                  uint64_t Offset, bool TrimAtNul) {
3274   ConstantDataArraySlice Slice;
3275   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3276     return false;
3277 
3278   if (Slice.Array == nullptr) {
3279     if (TrimAtNul) {
3280       Str = StringRef();
3281       return true;
3282     }
3283     if (Slice.Length == 1) {
3284       Str = StringRef("", 1);
3285       return true;
3286     }
3287     // We cannot instantiate a StringRef as we do not have an appropriate string
3288     // of 0s at hand.
3289     return false;
3290   }
3291 
3292   // Start out with the entire array in the StringRef.
3293   Str = Slice.Array->getAsString();
3294   // Skip over 'offset' bytes.
3295   Str = Str.substr(Slice.Offset);
3296 
3297   if (TrimAtNul) {
3298     // Trim off the \0 and anything after it.  If the array is not nul
3299     // terminated, we just return the whole end of string.  The client may know
3300     // some other way that the string is length-bound.
3301     Str = Str.substr(0, Str.find('\0'));
3302   }
3303   return true;
3304 }
3305 
3306 // These next two are very similar to the above, but also look through PHI
3307 // nodes.
3308 // TODO: See if we can integrate these two together.
3309 
3310 /// If we can compute the length of the string pointed to by
3311 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3312 static uint64_t GetStringLengthH(const Value *V,
3313                                  SmallPtrSetImpl<const PHINode*> &PHIs,
3314                                  unsigned CharSize) {
3315   // Look through noop bitcast instructions.
3316   V = V->stripPointerCasts();
3317 
3318   // If this is a PHI node, there are two cases: either we have already seen it
3319   // or we haven't.
3320   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3321     if (!PHIs.insert(PN).second)
3322       return ~0ULL;  // already in the set.
3323 
3324     // If it was new, see if all the input strings are the same length.
3325     uint64_t LenSoFar = ~0ULL;
3326     for (Value *IncValue : PN->incoming_values()) {
3327       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3328       if (Len == 0) return 0; // Unknown length -> unknown.
3329 
3330       if (Len == ~0ULL) continue;
3331 
3332       if (Len != LenSoFar && LenSoFar != ~0ULL)
3333         return 0;    // Disagree -> unknown.
3334       LenSoFar = Len;
3335     }
3336 
3337     // Success, all agree.
3338     return LenSoFar;
3339   }
3340 
3341   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3342   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3343     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3344     if (Len1 == 0) return 0;
3345     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3346     if (Len2 == 0) return 0;
3347     if (Len1 == ~0ULL) return Len2;
3348     if (Len2 == ~0ULL) return Len1;
3349     if (Len1 != Len2) return 0;
3350     return Len1;
3351   }
3352 
3353   // Otherwise, see if we can read the string.
3354   ConstantDataArraySlice Slice;
3355   if (!getConstantDataArrayInfo(V, Slice, CharSize))
3356     return 0;
3357 
3358   if (Slice.Array == nullptr)
3359     return 1;
3360 
3361   // Search for nul characters
3362   unsigned NullIndex = 0;
3363   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3364     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3365       break;
3366   }
3367 
3368   return NullIndex + 1;
3369 }
3370 
3371 /// If we can compute the length of the string pointed to by
3372 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3373 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3374   if (!V->getType()->isPointerTy()) return 0;
3375 
3376   SmallPtrSet<const PHINode*, 32> PHIs;
3377   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3378   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3379   // an empty string as a length.
3380   return Len == ~0ULL ? 1 : Len;
3381 }
3382 
3383 /// \brief \p PN defines a loop-variant pointer to an object.  Check if the
3384 /// previous iteration of the loop was referring to the same object as \p PN.
3385 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3386                                          const LoopInfo *LI) {
3387   // Find the loop-defined value.
3388   Loop *L = LI->getLoopFor(PN->getParent());
3389   if (PN->getNumIncomingValues() != 2)
3390     return true;
3391 
3392   // Find the value from previous iteration.
3393   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3394   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3395     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3396   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3397     return true;
3398 
3399   // If a new pointer is loaded in the loop, the pointer references a different
3400   // object in every iteration.  E.g.:
3401   //    for (i)
3402   //       int *p = a[i];
3403   //       ...
3404   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3405     if (!L->isLoopInvariant(Load->getPointerOperand()))
3406       return false;
3407   return true;
3408 }
3409 
3410 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3411                                  unsigned MaxLookup) {
3412   if (!V->getType()->isPointerTy())
3413     return V;
3414   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3415     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3416       V = GEP->getPointerOperand();
3417     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3418                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3419       V = cast<Operator>(V)->getOperand(0);
3420     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3421       if (GA->isInterposable())
3422         return V;
3423       V = GA->getAliasee();
3424     } else if (isa<AllocaInst>(V)) {
3425       // An alloca can't be further simplified.
3426       return V;
3427     } else {
3428       if (auto CS = CallSite(V))
3429         if (Value *RV = CS.getReturnedArgOperand()) {
3430           V = RV;
3431           continue;
3432         }
3433 
3434       // See if InstructionSimplify knows any relevant tricks.
3435       if (Instruction *I = dyn_cast<Instruction>(V))
3436         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3437         if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3438           V = Simplified;
3439           continue;
3440         }
3441 
3442       return V;
3443     }
3444     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3445   }
3446   return V;
3447 }
3448 
3449 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3450                                 const DataLayout &DL, LoopInfo *LI,
3451                                 unsigned MaxLookup) {
3452   SmallPtrSet<Value *, 4> Visited;
3453   SmallVector<Value *, 4> Worklist;
3454   Worklist.push_back(V);
3455   do {
3456     Value *P = Worklist.pop_back_val();
3457     P = GetUnderlyingObject(P, DL, MaxLookup);
3458 
3459     if (!Visited.insert(P).second)
3460       continue;
3461 
3462     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3463       Worklist.push_back(SI->getTrueValue());
3464       Worklist.push_back(SI->getFalseValue());
3465       continue;
3466     }
3467 
3468     if (PHINode *PN = dyn_cast<PHINode>(P)) {
3469       // If this PHI changes the underlying object in every iteration of the
3470       // loop, don't look through it.  Consider:
3471       //   int **A;
3472       //   for (i) {
3473       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3474       //     Curr = A[i];
3475       //     *Prev, *Curr;
3476       //
3477       // Prev is tracking Curr one iteration behind so they refer to different
3478       // underlying objects.
3479       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3480           isSameUnderlyingObjectInLoop(PN, LI))
3481         for (Value *IncValue : PN->incoming_values())
3482           Worklist.push_back(IncValue);
3483       continue;
3484     }
3485 
3486     Objects.push_back(P);
3487   } while (!Worklist.empty());
3488 }
3489 
3490 /// This is the function that does the work of looking through basic
3491 /// ptrtoint+arithmetic+inttoptr sequences.
3492 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3493   do {
3494     if (const Operator *U = dyn_cast<Operator>(V)) {
3495       // If we find a ptrtoint, we can transfer control back to the
3496       // regular getUnderlyingObjectFromInt.
3497       if (U->getOpcode() == Instruction::PtrToInt)
3498         return U->getOperand(0);
3499       // If we find an add of a constant, a multiplied value, or a phi, it's
3500       // likely that the other operand will lead us to the base
3501       // object. We don't have to worry about the case where the
3502       // object address is somehow being computed by the multiply,
3503       // because our callers only care when the result is an
3504       // identifiable object.
3505       if (U->getOpcode() != Instruction::Add ||
3506           (!isa<ConstantInt>(U->getOperand(1)) &&
3507            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3508            !isa<PHINode>(U->getOperand(1))))
3509         return V;
3510       V = U->getOperand(0);
3511     } else {
3512       return V;
3513     }
3514     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3515   } while (true);
3516 }
3517 
3518 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3519 /// ptrtoint+arithmetic+inttoptr sequences.
3520 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3521 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
3522                           SmallVectorImpl<Value *> &Objects,
3523                           const DataLayout &DL) {
3524   SmallPtrSet<const Value *, 16> Visited;
3525   SmallVector<const Value *, 4> Working(1, V);
3526   do {
3527     V = Working.pop_back_val();
3528 
3529     SmallVector<Value *, 4> Objs;
3530     GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
3531 
3532     for (Value *V : Objs) {
3533       if (!Visited.insert(V).second)
3534         continue;
3535       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3536         const Value *O =
3537           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3538         if (O->getType()->isPointerTy()) {
3539           Working.push_back(O);
3540           continue;
3541         }
3542       }
3543       // If GetUnderlyingObjects fails to find an identifiable object,
3544       // getUnderlyingObjectsForCodeGen also fails for safety.
3545       if (!isIdentifiedObject(V)) {
3546         Objects.clear();
3547         return false;
3548       }
3549       Objects.push_back(const_cast<Value *>(V));
3550     }
3551   } while (!Working.empty());
3552   return true;
3553 }
3554 
3555 /// Return true if the only users of this pointer are lifetime markers.
3556 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3557   for (const User *U : V->users()) {
3558     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3559     if (!II) return false;
3560 
3561     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3562         II->getIntrinsicID() != Intrinsic::lifetime_end)
3563       return false;
3564   }
3565   return true;
3566 }
3567 
3568 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3569                                         const Instruction *CtxI,
3570                                         const DominatorTree *DT) {
3571   const Operator *Inst = dyn_cast<Operator>(V);
3572   if (!Inst)
3573     return false;
3574 
3575   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3576     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3577       if (C->canTrap())
3578         return false;
3579 
3580   switch (Inst->getOpcode()) {
3581   default:
3582     return true;
3583   case Instruction::UDiv:
3584   case Instruction::URem: {
3585     // x / y is undefined if y == 0.
3586     const APInt *V;
3587     if (match(Inst->getOperand(1), m_APInt(V)))
3588       return *V != 0;
3589     return false;
3590   }
3591   case Instruction::SDiv:
3592   case Instruction::SRem: {
3593     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3594     const APInt *Numerator, *Denominator;
3595     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3596       return false;
3597     // We cannot hoist this division if the denominator is 0.
3598     if (*Denominator == 0)
3599       return false;
3600     // It's safe to hoist if the denominator is not 0 or -1.
3601     if (*Denominator != -1)
3602       return true;
3603     // At this point we know that the denominator is -1.  It is safe to hoist as
3604     // long we know that the numerator is not INT_MIN.
3605     if (match(Inst->getOperand(0), m_APInt(Numerator)))
3606       return !Numerator->isMinSignedValue();
3607     // The numerator *might* be MinSignedValue.
3608     return false;
3609   }
3610   case Instruction::Load: {
3611     const LoadInst *LI = cast<LoadInst>(Inst);
3612     if (!LI->isUnordered() ||
3613         // Speculative load may create a race that did not exist in the source.
3614         LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3615         // Speculative load may load data from dirty regions.
3616         LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3617         LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3618       return false;
3619     const DataLayout &DL = LI->getModule()->getDataLayout();
3620     return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3621                                               LI->getAlignment(), DL, CtxI, DT);
3622   }
3623   case Instruction::Call: {
3624     auto *CI = cast<const CallInst>(Inst);
3625     const Function *Callee = CI->getCalledFunction();
3626 
3627     // The called function could have undefined behavior or side-effects, even
3628     // if marked readnone nounwind.
3629     return Callee && Callee->isSpeculatable();
3630   }
3631   case Instruction::VAArg:
3632   case Instruction::Alloca:
3633   case Instruction::Invoke:
3634   case Instruction::PHI:
3635   case Instruction::Store:
3636   case Instruction::Ret:
3637   case Instruction::Br:
3638   case Instruction::IndirectBr:
3639   case Instruction::Switch:
3640   case Instruction::Unreachable:
3641   case Instruction::Fence:
3642   case Instruction::AtomicRMW:
3643   case Instruction::AtomicCmpXchg:
3644   case Instruction::LandingPad:
3645   case Instruction::Resume:
3646   case Instruction::CatchSwitch:
3647   case Instruction::CatchPad:
3648   case Instruction::CatchRet:
3649   case Instruction::CleanupPad:
3650   case Instruction::CleanupRet:
3651     return false; // Misc instructions which have effects
3652   }
3653 }
3654 
3655 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3656   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3657 }
3658 
3659 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3660                                                    const Value *RHS,
3661                                                    const DataLayout &DL,
3662                                                    AssumptionCache *AC,
3663                                                    const Instruction *CxtI,
3664                                                    const DominatorTree *DT) {
3665   // Multiplying n * m significant bits yields a result of n + m significant
3666   // bits. If the total number of significant bits does not exceed the
3667   // result bit width (minus 1), there is no overflow.
3668   // This means if we have enough leading zero bits in the operands
3669   // we can guarantee that the result does not overflow.
3670   // Ref: "Hacker's Delight" by Henry Warren
3671   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3672   KnownBits LHSKnown(BitWidth);
3673   KnownBits RHSKnown(BitWidth);
3674   computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3675   computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3676   // Note that underestimating the number of zero bits gives a more
3677   // conservative answer.
3678   unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
3679                       RHSKnown.countMinLeadingZeros();
3680   // First handle the easy case: if we have enough zero bits there's
3681   // definitely no overflow.
3682   if (ZeroBits >= BitWidth)
3683     return OverflowResult::NeverOverflows;
3684 
3685   // Get the largest possible values for each operand.
3686   APInt LHSMax = ~LHSKnown.Zero;
3687   APInt RHSMax = ~RHSKnown.Zero;
3688 
3689   // We know the multiply operation doesn't overflow if the maximum values for
3690   // each operand will not overflow after we multiply them together.
3691   bool MaxOverflow;
3692   (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3693   if (!MaxOverflow)
3694     return OverflowResult::NeverOverflows;
3695 
3696   // We know it always overflows if multiplying the smallest possible values for
3697   // the operands also results in overflow.
3698   bool MinOverflow;
3699   (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
3700   if (MinOverflow)
3701     return OverflowResult::AlwaysOverflows;
3702 
3703   return OverflowResult::MayOverflow;
3704 }
3705 
3706 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3707                                                    const Value *RHS,
3708                                                    const DataLayout &DL,
3709                                                    AssumptionCache *AC,
3710                                                    const Instruction *CxtI,
3711                                                    const DominatorTree *DT) {
3712   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3713   if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
3714     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3715 
3716     if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
3717       // The sign bit is set in both cases: this MUST overflow.
3718       // Create a simple add instruction, and insert it into the struct.
3719       return OverflowResult::AlwaysOverflows;
3720     }
3721 
3722     if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
3723       // The sign bit is clear in both cases: this CANNOT overflow.
3724       // Create a simple add instruction, and insert it into the struct.
3725       return OverflowResult::NeverOverflows;
3726     }
3727   }
3728 
3729   return OverflowResult::MayOverflow;
3730 }
3731 
3732 /// \brief Return true if we can prove that adding the two values of the
3733 /// knownbits will not overflow.
3734 /// Otherwise return false.
3735 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
3736                                     const KnownBits &RHSKnown) {
3737   // Addition of two 2's complement numbers having opposite signs will never
3738   // overflow.
3739   if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
3740       (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
3741     return true;
3742 
3743   // If either of the values is known to be non-negative, adding them can only
3744   // overflow if the second is also non-negative, so we can assume that.
3745   // Two non-negative numbers will only overflow if there is a carry to the
3746   // sign bit, so we can check if even when the values are as big as possible
3747   // there is no overflow to the sign bit.
3748   if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
3749     APInt MaxLHS = ~LHSKnown.Zero;
3750     MaxLHS.clearSignBit();
3751     APInt MaxRHS = ~RHSKnown.Zero;
3752     MaxRHS.clearSignBit();
3753     APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
3754     return Result.isSignBitClear();
3755   }
3756 
3757   // If either of the values is known to be negative, adding them can only
3758   // overflow if the second is also negative, so we can assume that.
3759   // Two negative number will only overflow if there is no carry to the sign
3760   // bit, so we can check if even when the values are as small as possible
3761   // there is overflow to the sign bit.
3762   if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
3763     APInt MinLHS = LHSKnown.One;
3764     MinLHS.clearSignBit();
3765     APInt MinRHS = RHSKnown.One;
3766     MinRHS.clearSignBit();
3767     APInt Result = std::move(MinLHS) + std::move(MinRHS);
3768     return Result.isSignBitSet();
3769   }
3770 
3771   // If we reached here it means that we know nothing about the sign bits.
3772   // In this case we can't know if there will be an overflow, since by
3773   // changing the sign bits any two values can be made to overflow.
3774   return false;
3775 }
3776 
3777 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3778                                                   const Value *RHS,
3779                                                   const AddOperator *Add,
3780                                                   const DataLayout &DL,
3781                                                   AssumptionCache *AC,
3782                                                   const Instruction *CxtI,
3783                                                   const DominatorTree *DT) {
3784   if (Add && Add->hasNoSignedWrap()) {
3785     return OverflowResult::NeverOverflows;
3786   }
3787 
3788   // If LHS and RHS each have at least two sign bits, the addition will look
3789   // like
3790   //
3791   // XX..... +
3792   // YY.....
3793   //
3794   // If the carry into the most significant position is 0, X and Y can't both
3795   // be 1 and therefore the carry out of the addition is also 0.
3796   //
3797   // If the carry into the most significant position is 1, X and Y can't both
3798   // be 0 and therefore the carry out of the addition is also 1.
3799   //
3800   // Since the carry into the most significant position is always equal to
3801   // the carry out of the addition, there is no signed overflow.
3802   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3803       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3804     return OverflowResult::NeverOverflows;
3805 
3806   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3807   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3808 
3809   if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
3810     return OverflowResult::NeverOverflows;
3811 
3812   // The remaining code needs Add to be available. Early returns if not so.
3813   if (!Add)
3814     return OverflowResult::MayOverflow;
3815 
3816   // If the sign of Add is the same as at least one of the operands, this add
3817   // CANNOT overflow. This is particularly useful when the sum is
3818   // @llvm.assume'ed non-negative rather than proved so from analyzing its
3819   // operands.
3820   bool LHSOrRHSKnownNonNegative =
3821       (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
3822   bool LHSOrRHSKnownNegative =
3823       (LHSKnown.isNegative() || RHSKnown.isNegative());
3824   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3825     KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
3826     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
3827         (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
3828       return OverflowResult::NeverOverflows;
3829     }
3830   }
3831 
3832   return OverflowResult::MayOverflow;
3833 }
3834 
3835 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3836                                      const DominatorTree &DT) {
3837 #ifndef NDEBUG
3838   auto IID = II->getIntrinsicID();
3839   assert((IID == Intrinsic::sadd_with_overflow ||
3840           IID == Intrinsic::uadd_with_overflow ||
3841           IID == Intrinsic::ssub_with_overflow ||
3842           IID == Intrinsic::usub_with_overflow ||
3843           IID == Intrinsic::smul_with_overflow ||
3844           IID == Intrinsic::umul_with_overflow) &&
3845          "Not an overflow intrinsic!");
3846 #endif
3847 
3848   SmallVector<const BranchInst *, 2> GuardingBranches;
3849   SmallVector<const ExtractValueInst *, 2> Results;
3850 
3851   for (const User *U : II->users()) {
3852     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3853       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3854 
3855       if (EVI->getIndices()[0] == 0)
3856         Results.push_back(EVI);
3857       else {
3858         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3859 
3860         for (const auto *U : EVI->users())
3861           if (const auto *B = dyn_cast<BranchInst>(U)) {
3862             assert(B->isConditional() && "How else is it using an i1?");
3863             GuardingBranches.push_back(B);
3864           }
3865       }
3866     } else {
3867       // We are using the aggregate directly in a way we don't want to analyze
3868       // here (storing it to a global, say).
3869       return false;
3870     }
3871   }
3872 
3873   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3874     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3875     if (!NoWrapEdge.isSingleEdge())
3876       return false;
3877 
3878     // Check if all users of the add are provably no-wrap.
3879     for (const auto *Result : Results) {
3880       // If the extractvalue itself is not executed on overflow, the we don't
3881       // need to check each use separately, since domination is transitive.
3882       if (DT.dominates(NoWrapEdge, Result->getParent()))
3883         continue;
3884 
3885       for (auto &RU : Result->uses())
3886         if (!DT.dominates(NoWrapEdge, RU))
3887           return false;
3888     }
3889 
3890     return true;
3891   };
3892 
3893   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
3894 }
3895 
3896 
3897 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3898                                                  const DataLayout &DL,
3899                                                  AssumptionCache *AC,
3900                                                  const Instruction *CxtI,
3901                                                  const DominatorTree *DT) {
3902   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3903                                        Add, DL, AC, CxtI, DT);
3904 }
3905 
3906 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3907                                                  const Value *RHS,
3908                                                  const DataLayout &DL,
3909                                                  AssumptionCache *AC,
3910                                                  const Instruction *CxtI,
3911                                                  const DominatorTree *DT) {
3912   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3913 }
3914 
3915 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3916   // A memory operation returns normally if it isn't volatile. A volatile
3917   // operation is allowed to trap.
3918   //
3919   // An atomic operation isn't guaranteed to return in a reasonable amount of
3920   // time because it's possible for another thread to interfere with it for an
3921   // arbitrary length of time, but programs aren't allowed to rely on that.
3922   if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3923     return !LI->isVolatile();
3924   if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3925     return !SI->isVolatile();
3926   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3927     return !CXI->isVolatile();
3928   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3929     return !RMWI->isVolatile();
3930   if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3931     return !MII->isVolatile();
3932 
3933   // If there is no successor, then execution can't transfer to it.
3934   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3935     return !CRI->unwindsToCaller();
3936   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3937     return !CatchSwitch->unwindsToCaller();
3938   if (isa<ResumeInst>(I))
3939     return false;
3940   if (isa<ReturnInst>(I))
3941     return false;
3942   if (isa<UnreachableInst>(I))
3943     return false;
3944 
3945   // Calls can throw, or contain an infinite loop, or kill the process.
3946   if (auto CS = ImmutableCallSite(I)) {
3947     // Call sites that throw have implicit non-local control flow.
3948     if (!CS.doesNotThrow())
3949       return false;
3950 
3951     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3952     // etc. and thus not return.  However, LLVM already assumes that
3953     //
3954     //  - Thread exiting actions are modeled as writes to memory invisible to
3955     //    the program.
3956     //
3957     //  - Loops that don't have side effects (side effects are volatile/atomic
3958     //    stores and IO) always terminate (see http://llvm.org/PR965).
3959     //    Furthermore IO itself is also modeled as writes to memory invisible to
3960     //    the program.
3961     //
3962     // We rely on those assumptions here, and use the memory effects of the call
3963     // target as a proxy for checking that it always returns.
3964 
3965     // FIXME: This isn't aggressive enough; a call which only writes to a global
3966     // is guaranteed to return.
3967     return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3968            match(I, m_Intrinsic<Intrinsic::assume>()) ||
3969            match(I, m_Intrinsic<Intrinsic::sideeffect>());
3970   }
3971 
3972   // Other instructions return normally.
3973   return true;
3974 }
3975 
3976 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
3977   // TODO: This is slightly consdervative for invoke instruction since exiting
3978   // via an exception *is* normal control for them.
3979   for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
3980     if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
3981       return false;
3982   return true;
3983 }
3984 
3985 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3986                                                   const Loop *L) {
3987   // The loop header is guaranteed to be executed for every iteration.
3988   //
3989   // FIXME: Relax this constraint to cover all basic blocks that are
3990   // guaranteed to be executed at every iteration.
3991   if (I->getParent() != L->getHeader()) return false;
3992 
3993   for (const Instruction &LI : *L->getHeader()) {
3994     if (&LI == I) return true;
3995     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3996   }
3997   llvm_unreachable("Instruction not contained in its own parent basic block.");
3998 }
3999 
4000 bool llvm::propagatesFullPoison(const Instruction *I) {
4001   switch (I->getOpcode()) {
4002   case Instruction::Add:
4003   case Instruction::Sub:
4004   case Instruction::Xor:
4005   case Instruction::Trunc:
4006   case Instruction::BitCast:
4007   case Instruction::AddrSpaceCast:
4008   case Instruction::Mul:
4009   case Instruction::Shl:
4010   case Instruction::GetElementPtr:
4011     // These operations all propagate poison unconditionally. Note that poison
4012     // is not any particular value, so xor or subtraction of poison with
4013     // itself still yields poison, not zero.
4014     return true;
4015 
4016   case Instruction::AShr:
4017   case Instruction::SExt:
4018     // For these operations, one bit of the input is replicated across
4019     // multiple output bits. A replicated poison bit is still poison.
4020     return true;
4021 
4022   case Instruction::ICmp:
4023     // Comparing poison with any value yields poison.  This is why, for
4024     // instance, x s< (x +nsw 1) can be folded to true.
4025     return true;
4026 
4027   default:
4028     return false;
4029   }
4030 }
4031 
4032 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
4033   switch (I->getOpcode()) {
4034     case Instruction::Store:
4035       return cast<StoreInst>(I)->getPointerOperand();
4036 
4037     case Instruction::Load:
4038       return cast<LoadInst>(I)->getPointerOperand();
4039 
4040     case Instruction::AtomicCmpXchg:
4041       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4042 
4043     case Instruction::AtomicRMW:
4044       return cast<AtomicRMWInst>(I)->getPointerOperand();
4045 
4046     case Instruction::UDiv:
4047     case Instruction::SDiv:
4048     case Instruction::URem:
4049     case Instruction::SRem:
4050       return I->getOperand(1);
4051 
4052     default:
4053       return nullptr;
4054   }
4055 }
4056 
4057 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
4058   // We currently only look for uses of poison values within the same basic
4059   // block, as that makes it easier to guarantee that the uses will be
4060   // executed given that PoisonI is executed.
4061   //
4062   // FIXME: Expand this to consider uses beyond the same basic block. To do
4063   // this, look out for the distinction between post-dominance and strong
4064   // post-dominance.
4065   const BasicBlock *BB = PoisonI->getParent();
4066 
4067   // Set of instructions that we have proved will yield poison if PoisonI
4068   // does.
4069   SmallSet<const Value *, 16> YieldsPoison;
4070   SmallSet<const BasicBlock *, 4> Visited;
4071   YieldsPoison.insert(PoisonI);
4072   Visited.insert(PoisonI->getParent());
4073 
4074   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4075 
4076   unsigned Iter = 0;
4077   while (Iter++ < MaxDepth) {
4078     for (auto &I : make_range(Begin, End)) {
4079       if (&I != PoisonI) {
4080         const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
4081         if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
4082           return true;
4083         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4084           return false;
4085       }
4086 
4087       // Mark poison that propagates from I through uses of I.
4088       if (YieldsPoison.count(&I)) {
4089         for (const User *User : I.users()) {
4090           const Instruction *UserI = cast<Instruction>(User);
4091           if (propagatesFullPoison(UserI))
4092             YieldsPoison.insert(User);
4093         }
4094       }
4095     }
4096 
4097     if (auto *NextBB = BB->getSingleSuccessor()) {
4098       if (Visited.insert(NextBB).second) {
4099         BB = NextBB;
4100         Begin = BB->getFirstNonPHI()->getIterator();
4101         End = BB->end();
4102         continue;
4103       }
4104     }
4105 
4106     break;
4107   }
4108   return false;
4109 }
4110 
4111 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4112   if (FMF.noNaNs())
4113     return true;
4114 
4115   if (auto *C = dyn_cast<ConstantFP>(V))
4116     return !C->isNaN();
4117   return false;
4118 }
4119 
4120 static bool isKnownNonZero(const Value *V) {
4121   if (auto *C = dyn_cast<ConstantFP>(V))
4122     return !C->isZero();
4123   return false;
4124 }
4125 
4126 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4127 /// Given non-min/max outer cmp/select from the clamp pattern this
4128 /// function recognizes if it can be substitued by a "canonical" min/max
4129 /// pattern.
4130 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4131                                                Value *CmpLHS, Value *CmpRHS,
4132                                                Value *TrueVal, Value *FalseVal,
4133                                                Value *&LHS, Value *&RHS) {
4134   // Try to match
4135   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4136   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4137   // and return description of the outer Max/Min.
4138 
4139   // First, check if select has inverse order:
4140   if (CmpRHS == FalseVal) {
4141     std::swap(TrueVal, FalseVal);
4142     Pred = CmpInst::getInversePredicate(Pred);
4143   }
4144 
4145   // Assume success now. If there's no match, callers should not use these anyway.
4146   LHS = TrueVal;
4147   RHS = FalseVal;
4148 
4149   const APFloat *FC1;
4150   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4151     return {SPF_UNKNOWN, SPNB_NA, false};
4152 
4153   const APFloat *FC2;
4154   switch (Pred) {
4155   case CmpInst::FCMP_OLT:
4156   case CmpInst::FCMP_OLE:
4157   case CmpInst::FCMP_ULT:
4158   case CmpInst::FCMP_ULE:
4159     if (match(FalseVal,
4160               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4161                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4162         FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4163       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4164     break;
4165   case CmpInst::FCMP_OGT:
4166   case CmpInst::FCMP_OGE:
4167   case CmpInst::FCMP_UGT:
4168   case CmpInst::FCMP_UGE:
4169     if (match(FalseVal,
4170               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4171                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4172         FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4173       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4174     break;
4175   default:
4176     break;
4177   }
4178 
4179   return {SPF_UNKNOWN, SPNB_NA, false};
4180 }
4181 
4182 /// Recognize variations of:
4183 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4184 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
4185                                       Value *CmpLHS, Value *CmpRHS,
4186                                       Value *TrueVal, Value *FalseVal) {
4187   // Swap the select operands and predicate to match the patterns below.
4188   if (CmpRHS != TrueVal) {
4189     Pred = ICmpInst::getSwappedPredicate(Pred);
4190     std::swap(TrueVal, FalseVal);
4191   }
4192   const APInt *C1;
4193   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4194     const APInt *C2;
4195     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4196     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4197         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4198       return {SPF_SMAX, SPNB_NA, false};
4199 
4200     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4201     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4202         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4203       return {SPF_SMIN, SPNB_NA, false};
4204 
4205     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4206     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4207         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4208       return {SPF_UMAX, SPNB_NA, false};
4209 
4210     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4211     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4212         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4213       return {SPF_UMIN, SPNB_NA, false};
4214   }
4215   return {SPF_UNKNOWN, SPNB_NA, false};
4216 }
4217 
4218 /// Recognize variations of:
4219 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4220 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
4221                                                Value *CmpLHS, Value *CmpRHS,
4222                                                Value *TVal, Value *FVal,
4223                                                unsigned Depth) {
4224   // TODO: Allow FP min/max with nnan/nsz.
4225   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4226 
4227   Value *A, *B;
4228   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4229   if (!SelectPatternResult::isMinOrMax(L.Flavor))
4230     return {SPF_UNKNOWN, SPNB_NA, false};
4231 
4232   Value *C, *D;
4233   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4234   if (L.Flavor != R.Flavor)
4235     return {SPF_UNKNOWN, SPNB_NA, false};
4236 
4237   // We have something like: x Pred y ? min(a, b) : min(c, d).
4238   // Try to match the compare to the min/max operations of the select operands.
4239   // First, make sure we have the right compare predicate.
4240   switch (L.Flavor) {
4241   case SPF_SMIN:
4242     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4243       Pred = ICmpInst::getSwappedPredicate(Pred);
4244       std::swap(CmpLHS, CmpRHS);
4245     }
4246     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4247       break;
4248     return {SPF_UNKNOWN, SPNB_NA, false};
4249   case SPF_SMAX:
4250     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4251       Pred = ICmpInst::getSwappedPredicate(Pred);
4252       std::swap(CmpLHS, CmpRHS);
4253     }
4254     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4255       break;
4256     return {SPF_UNKNOWN, SPNB_NA, false};
4257   case SPF_UMIN:
4258     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4259       Pred = ICmpInst::getSwappedPredicate(Pred);
4260       std::swap(CmpLHS, CmpRHS);
4261     }
4262     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4263       break;
4264     return {SPF_UNKNOWN, SPNB_NA, false};
4265   case SPF_UMAX:
4266     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4267       Pred = ICmpInst::getSwappedPredicate(Pred);
4268       std::swap(CmpLHS, CmpRHS);
4269     }
4270     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4271       break;
4272     return {SPF_UNKNOWN, SPNB_NA, false};
4273   default:
4274     return {SPF_UNKNOWN, SPNB_NA, false};
4275   }
4276 
4277   // If there is a common operand in the already matched min/max and the other
4278   // min/max operands match the compare operands (either directly or inverted),
4279   // then this is min/max of the same flavor.
4280 
4281   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4282   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4283   if (D == B) {
4284     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4285                                          match(A, m_Not(m_Specific(CmpRHS)))))
4286       return {L.Flavor, SPNB_NA, false};
4287   }
4288   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4289   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4290   if (C == B) {
4291     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4292                                          match(A, m_Not(m_Specific(CmpRHS)))))
4293       return {L.Flavor, SPNB_NA, false};
4294   }
4295   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4296   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4297   if (D == A) {
4298     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4299                                          match(B, m_Not(m_Specific(CmpRHS)))))
4300       return {L.Flavor, SPNB_NA, false};
4301   }
4302   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4303   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4304   if (C == A) {
4305     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4306                                          match(B, m_Not(m_Specific(CmpRHS)))))
4307       return {L.Flavor, SPNB_NA, false};
4308   }
4309 
4310   return {SPF_UNKNOWN, SPNB_NA, false};
4311 }
4312 
4313 /// Match non-obvious integer minimum and maximum sequences.
4314 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4315                                        Value *CmpLHS, Value *CmpRHS,
4316                                        Value *TrueVal, Value *FalseVal,
4317                                        Value *&LHS, Value *&RHS,
4318                                        unsigned Depth) {
4319   // Assume success. If there's no match, callers should not use these anyway.
4320   LHS = TrueVal;
4321   RHS = FalseVal;
4322 
4323   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4324   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4325     return SPR;
4326 
4327   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4328   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4329     return SPR;
4330 
4331   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4332     return {SPF_UNKNOWN, SPNB_NA, false};
4333 
4334   // Z = X -nsw Y
4335   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4336   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4337   if (match(TrueVal, m_Zero()) &&
4338       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4339     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4340 
4341   // Z = X -nsw Y
4342   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4343   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4344   if (match(FalseVal, m_Zero()) &&
4345       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4346     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4347 
4348   const APInt *C1;
4349   if (!match(CmpRHS, m_APInt(C1)))
4350     return {SPF_UNKNOWN, SPNB_NA, false};
4351 
4352   // An unsigned min/max can be written with a signed compare.
4353   const APInt *C2;
4354   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4355       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4356     // Is the sign bit set?
4357     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4358     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4359     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4360         C2->isMaxSignedValue())
4361       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4362 
4363     // Is the sign bit clear?
4364     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4365     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4366     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4367         C2->isMinSignedValue())
4368       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4369   }
4370 
4371   // Look through 'not' ops to find disguised signed min/max.
4372   // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4373   // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4374   if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4375       match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4376     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4377 
4378   // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4379   // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4380   if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4381       match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4382     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4383 
4384   return {SPF_UNKNOWN, SPNB_NA, false};
4385 }
4386 
4387 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4388                                               FastMathFlags FMF,
4389                                               Value *CmpLHS, Value *CmpRHS,
4390                                               Value *TrueVal, Value *FalseVal,
4391                                               Value *&LHS, Value *&RHS,
4392                                               unsigned Depth) {
4393   LHS = CmpLHS;
4394   RHS = CmpRHS;
4395 
4396   // Signed zero may return inconsistent results between implementations.
4397   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4398   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4399   // Therefore, we behave conservatively and only proceed if at least one of the
4400   // operands is known to not be zero or if we don't care about signed zero.
4401   switch (Pred) {
4402   default: break;
4403   // FIXME: Include OGT/OLT/UGT/ULT.
4404   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4405   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4406     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4407         !isKnownNonZero(CmpRHS))
4408       return {SPF_UNKNOWN, SPNB_NA, false};
4409   }
4410 
4411   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4412   bool Ordered = false;
4413 
4414   // When given one NaN and one non-NaN input:
4415   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4416   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4417   //     ordered comparison fails), which could be NaN or non-NaN.
4418   // so here we discover exactly what NaN behavior is required/accepted.
4419   if (CmpInst::isFPPredicate(Pred)) {
4420     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4421     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4422 
4423     if (LHSSafe && RHSSafe) {
4424       // Both operands are known non-NaN.
4425       NaNBehavior = SPNB_RETURNS_ANY;
4426     } else if (CmpInst::isOrdered(Pred)) {
4427       // An ordered comparison will return false when given a NaN, so it
4428       // returns the RHS.
4429       Ordered = true;
4430       if (LHSSafe)
4431         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4432         NaNBehavior = SPNB_RETURNS_NAN;
4433       else if (RHSSafe)
4434         NaNBehavior = SPNB_RETURNS_OTHER;
4435       else
4436         // Completely unsafe.
4437         return {SPF_UNKNOWN, SPNB_NA, false};
4438     } else {
4439       Ordered = false;
4440       // An unordered comparison will return true when given a NaN, so it
4441       // returns the LHS.
4442       if (LHSSafe)
4443         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4444         NaNBehavior = SPNB_RETURNS_OTHER;
4445       else if (RHSSafe)
4446         NaNBehavior = SPNB_RETURNS_NAN;
4447       else
4448         // Completely unsafe.
4449         return {SPF_UNKNOWN, SPNB_NA, false};
4450     }
4451   }
4452 
4453   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4454     std::swap(CmpLHS, CmpRHS);
4455     Pred = CmpInst::getSwappedPredicate(Pred);
4456     if (NaNBehavior == SPNB_RETURNS_NAN)
4457       NaNBehavior = SPNB_RETURNS_OTHER;
4458     else if (NaNBehavior == SPNB_RETURNS_OTHER)
4459       NaNBehavior = SPNB_RETURNS_NAN;
4460     Ordered = !Ordered;
4461   }
4462 
4463   // ([if]cmp X, Y) ? X : Y
4464   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4465     switch (Pred) {
4466     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4467     case ICmpInst::ICMP_UGT:
4468     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4469     case ICmpInst::ICMP_SGT:
4470     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4471     case ICmpInst::ICMP_ULT:
4472     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4473     case ICmpInst::ICMP_SLT:
4474     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4475     case FCmpInst::FCMP_UGT:
4476     case FCmpInst::FCMP_UGE:
4477     case FCmpInst::FCMP_OGT:
4478     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4479     case FCmpInst::FCMP_ULT:
4480     case FCmpInst::FCMP_ULE:
4481     case FCmpInst::FCMP_OLT:
4482     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4483     }
4484   }
4485 
4486   const APInt *C1;
4487   if (match(CmpRHS, m_APInt(C1))) {
4488     if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4489         (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4490 
4491       // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4492       // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4493       if (Pred == ICmpInst::ICMP_SGT &&
4494           (C1->isNullValue() || C1->isAllOnesValue())) {
4495         return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4496       }
4497 
4498       // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4499       // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4500       if (Pred == ICmpInst::ICMP_SLT &&
4501           (C1->isNullValue() || C1->isOneValue())) {
4502         return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4503       }
4504     }
4505   }
4506 
4507   if (CmpInst::isIntPredicate(Pred))
4508     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4509 
4510   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4511   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4512   // semantics than minNum. Be conservative in such case.
4513   if (NaNBehavior != SPNB_RETURNS_ANY ||
4514       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4515        !isKnownNonZero(CmpRHS)))
4516     return {SPF_UNKNOWN, SPNB_NA, false};
4517 
4518   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4519 }
4520 
4521 /// Helps to match a select pattern in case of a type mismatch.
4522 ///
4523 /// The function processes the case when type of true and false values of a
4524 /// select instruction differs from type of the cmp instruction operands because
4525 /// of a cast instruction. The function checks if it is legal to move the cast
4526 /// operation after "select". If yes, it returns the new second value of
4527 /// "select" (with the assumption that cast is moved):
4528 /// 1. As operand of cast instruction when both values of "select" are same cast
4529 /// instructions.
4530 /// 2. As restored constant (by applying reverse cast operation) when the first
4531 /// value of the "select" is a cast operation and the second value is a
4532 /// constant.
4533 /// NOTE: We return only the new second value because the first value could be
4534 /// accessed as operand of cast instruction.
4535 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4536                               Instruction::CastOps *CastOp) {
4537   auto *Cast1 = dyn_cast<CastInst>(V1);
4538   if (!Cast1)
4539     return nullptr;
4540 
4541   *CastOp = Cast1->getOpcode();
4542   Type *SrcTy = Cast1->getSrcTy();
4543   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4544     // If V1 and V2 are both the same cast from the same type, look through V1.
4545     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4546       return Cast2->getOperand(0);
4547     return nullptr;
4548   }
4549 
4550   auto *C = dyn_cast<Constant>(V2);
4551   if (!C)
4552     return nullptr;
4553 
4554   Constant *CastedTo = nullptr;
4555   switch (*CastOp) {
4556   case Instruction::ZExt:
4557     if (CmpI->isUnsigned())
4558       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4559     break;
4560   case Instruction::SExt:
4561     if (CmpI->isSigned())
4562       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4563     break;
4564   case Instruction::Trunc:
4565     Constant *CmpConst;
4566     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4567         CmpConst->getType() == SrcTy) {
4568       // Here we have the following case:
4569       //
4570       //   %cond = cmp iN %x, CmpConst
4571       //   %tr = trunc iN %x to iK
4572       //   %narrowsel = select i1 %cond, iK %t, iK C
4573       //
4574       // We can always move trunc after select operation:
4575       //
4576       //   %cond = cmp iN %x, CmpConst
4577       //   %widesel = select i1 %cond, iN %x, iN CmpConst
4578       //   %tr = trunc iN %widesel to iK
4579       //
4580       // Note that C could be extended in any way because we don't care about
4581       // upper bits after truncation. It can't be abs pattern, because it would
4582       // look like:
4583       //
4584       //   select i1 %cond, x, -x.
4585       //
4586       // So only min/max pattern could be matched. Such match requires widened C
4587       // == CmpConst. That is why set widened C = CmpConst, condition trunc
4588       // CmpConst == C is checked below.
4589       CastedTo = CmpConst;
4590     } else {
4591       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4592     }
4593     break;
4594   case Instruction::FPTrunc:
4595     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4596     break;
4597   case Instruction::FPExt:
4598     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4599     break;
4600   case Instruction::FPToUI:
4601     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4602     break;
4603   case Instruction::FPToSI:
4604     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4605     break;
4606   case Instruction::UIToFP:
4607     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4608     break;
4609   case Instruction::SIToFP:
4610     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4611     break;
4612   default:
4613     break;
4614   }
4615 
4616   if (!CastedTo)
4617     return nullptr;
4618 
4619   // Make sure the cast doesn't lose any information.
4620   Constant *CastedBack =
4621       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4622   if (CastedBack != C)
4623     return nullptr;
4624 
4625   return CastedTo;
4626 }
4627 
4628 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4629                                              Instruction::CastOps *CastOp,
4630                                              unsigned Depth) {
4631   if (Depth >= MaxDepth)
4632     return {SPF_UNKNOWN, SPNB_NA, false};
4633 
4634   SelectInst *SI = dyn_cast<SelectInst>(V);
4635   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4636 
4637   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4638   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4639 
4640   CmpInst::Predicate Pred = CmpI->getPredicate();
4641   Value *CmpLHS = CmpI->getOperand(0);
4642   Value *CmpRHS = CmpI->getOperand(1);
4643   Value *TrueVal = SI->getTrueValue();
4644   Value *FalseVal = SI->getFalseValue();
4645   FastMathFlags FMF;
4646   if (isa<FPMathOperator>(CmpI))
4647     FMF = CmpI->getFastMathFlags();
4648 
4649   // Bail out early.
4650   if (CmpI->isEquality())
4651     return {SPF_UNKNOWN, SPNB_NA, false};
4652 
4653   // Deal with type mismatches.
4654   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4655     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
4656       // If this is a potential fmin/fmax with a cast to integer, then ignore
4657       // -0.0 because there is no corresponding integer value.
4658       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4659         FMF.setNoSignedZeros();
4660       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4661                                   cast<CastInst>(TrueVal)->getOperand(0), C,
4662                                   LHS, RHS, Depth);
4663     }
4664     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
4665       // If this is a potential fmin/fmax with a cast to integer, then ignore
4666       // -0.0 because there is no corresponding integer value.
4667       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4668         FMF.setNoSignedZeros();
4669       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4670                                   C, cast<CastInst>(FalseVal)->getOperand(0),
4671                                   LHS, RHS, Depth);
4672     }
4673   }
4674   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4675                               LHS, RHS, Depth);
4676 }
4677 
4678 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
4679   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
4680   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
4681   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
4682   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
4683   if (SPF == SPF_FMINNUM)
4684     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
4685   if (SPF == SPF_FMAXNUM)
4686     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
4687   llvm_unreachable("unhandled!");
4688 }
4689 
4690 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
4691   if (SPF == SPF_SMIN) return SPF_SMAX;
4692   if (SPF == SPF_UMIN) return SPF_UMAX;
4693   if (SPF == SPF_SMAX) return SPF_SMIN;
4694   if (SPF == SPF_UMAX) return SPF_UMIN;
4695   llvm_unreachable("unhandled!");
4696 }
4697 
4698 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
4699   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
4700 }
4701 
4702 /// Return true if "icmp Pred LHS RHS" is always true.
4703 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
4704                             const Value *RHS, const DataLayout &DL,
4705                             unsigned Depth) {
4706   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4707   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4708     return true;
4709 
4710   switch (Pred) {
4711   default:
4712     return false;
4713 
4714   case CmpInst::ICMP_SLE: {
4715     const APInt *C;
4716 
4717     // LHS s<= LHS +_{nsw} C   if C >= 0
4718     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4719       return !C->isNegative();
4720     return false;
4721   }
4722 
4723   case CmpInst::ICMP_ULE: {
4724     const APInt *C;
4725 
4726     // LHS u<= LHS +_{nuw} C   for any C
4727     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4728       return true;
4729 
4730     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4731     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4732                                        const Value *&X,
4733                                        const APInt *&CA, const APInt *&CB) {
4734       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4735           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4736         return true;
4737 
4738       // If X & C == 0 then (X | C) == X +_{nuw} C
4739       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4740           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4741         KnownBits Known(CA->getBitWidth());
4742         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
4743                          /*CxtI*/ nullptr, /*DT*/ nullptr);
4744         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
4745           return true;
4746       }
4747 
4748       return false;
4749     };
4750 
4751     const Value *X;
4752     const APInt *CLHS, *CRHS;
4753     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4754       return CLHS->ule(*CRHS);
4755 
4756     return false;
4757   }
4758   }
4759 }
4760 
4761 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4762 /// ALHS ARHS" is true.  Otherwise, return None.
4763 static Optional<bool>
4764 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4765                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
4766                       const DataLayout &DL, unsigned Depth) {
4767   switch (Pred) {
4768   default:
4769     return None;
4770 
4771   case CmpInst::ICMP_SLT:
4772   case CmpInst::ICMP_SLE:
4773     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
4774         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
4775       return true;
4776     return None;
4777 
4778   case CmpInst::ICMP_ULT:
4779   case CmpInst::ICMP_ULE:
4780     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
4781         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
4782       return true;
4783     return None;
4784   }
4785 }
4786 
4787 /// Return true if the operands of the two compares match.  IsSwappedOps is true
4788 /// when the operands match, but are swapped.
4789 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4790                           const Value *BLHS, const Value *BRHS,
4791                           bool &IsSwappedOps) {
4792 
4793   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4794   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4795   return IsMatchingOps || IsSwappedOps;
4796 }
4797 
4798 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4799 /// true.  Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4800 /// BRHS" is false.  Otherwise, return None if we can't infer anything.
4801 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4802                                                     const Value *ALHS,
4803                                                     const Value *ARHS,
4804                                                     CmpInst::Predicate BPred,
4805                                                     const Value *BLHS,
4806                                                     const Value *BRHS,
4807                                                     bool IsSwappedOps) {
4808   // Canonicalize the operands so they're matching.
4809   if (IsSwappedOps) {
4810     std::swap(BLHS, BRHS);
4811     BPred = ICmpInst::getSwappedPredicate(BPred);
4812   }
4813   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4814     return true;
4815   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4816     return false;
4817 
4818   return None;
4819 }
4820 
4821 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4822 /// true.  Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4823 /// C2" is false.  Otherwise, return None if we can't infer anything.
4824 static Optional<bool>
4825 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4826                                  const ConstantInt *C1,
4827                                  CmpInst::Predicate BPred,
4828                                  const Value *BLHS, const ConstantInt *C2) {
4829   assert(ALHS == BLHS && "LHS operands must match.");
4830   ConstantRange DomCR =
4831       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4832   ConstantRange CR =
4833       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4834   ConstantRange Intersection = DomCR.intersectWith(CR);
4835   ConstantRange Difference = DomCR.difference(CR);
4836   if (Intersection.isEmptySet())
4837     return false;
4838   if (Difference.isEmptySet())
4839     return true;
4840   return None;
4841 }
4842 
4843 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
4844 /// false.  Otherwise, return None if we can't infer anything.
4845 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
4846                                          const ICmpInst *RHS,
4847                                          const DataLayout &DL, bool LHSIsTrue,
4848                                          unsigned Depth) {
4849   Value *ALHS = LHS->getOperand(0);
4850   Value *ARHS = LHS->getOperand(1);
4851   // The rest of the logic assumes the LHS condition is true.  If that's not the
4852   // case, invert the predicate to make it so.
4853   ICmpInst::Predicate APred =
4854       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
4855 
4856   Value *BLHS = RHS->getOperand(0);
4857   Value *BRHS = RHS->getOperand(1);
4858   ICmpInst::Predicate BPred = RHS->getPredicate();
4859 
4860   // Can we infer anything when the two compares have matching operands?
4861   bool IsSwappedOps;
4862   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4863     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4864             APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4865       return Implication;
4866     // No amount of additional analysis will infer the second condition, so
4867     // early exit.
4868     return None;
4869   }
4870 
4871   // Can we infer anything when the LHS operands match and the RHS operands are
4872   // constants (not necessarily matching)?
4873   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4874     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4875             APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4876             cast<ConstantInt>(BRHS)))
4877       return Implication;
4878     // No amount of additional analysis will infer the second condition, so
4879     // early exit.
4880     return None;
4881   }
4882 
4883   if (APred == BPred)
4884     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
4885   return None;
4886 }
4887 
4888 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
4889 /// false.  Otherwise, return None if we can't infer anything.  We expect the
4890 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
4891 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
4892                                          const ICmpInst *RHS,
4893                                          const DataLayout &DL, bool LHSIsTrue,
4894                                          unsigned Depth) {
4895   // The LHS must be an 'or' or an 'and' instruction.
4896   assert((LHS->getOpcode() == Instruction::And ||
4897           LHS->getOpcode() == Instruction::Or) &&
4898          "Expected LHS to be 'and' or 'or'.");
4899 
4900   assert(Depth <= MaxDepth && "Hit recursion limit");
4901 
4902   // If the result of an 'or' is false, then we know both legs of the 'or' are
4903   // false.  Similarly, if the result of an 'and' is true, then we know both
4904   // legs of the 'and' are true.
4905   Value *ALHS, *ARHS;
4906   if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
4907       (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
4908     // FIXME: Make this non-recursion.
4909     if (Optional<bool> Implication =
4910             isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
4911       return Implication;
4912     if (Optional<bool> Implication =
4913             isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
4914       return Implication;
4915     return None;
4916   }
4917   return None;
4918 }
4919 
4920 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4921                                         const DataLayout &DL, bool LHSIsTrue,
4922                                         unsigned Depth) {
4923   // Bail out when we hit the limit.
4924   if (Depth == MaxDepth)
4925     return None;
4926 
4927   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
4928   // example.
4929   if (LHS->getType() != RHS->getType())
4930     return None;
4931 
4932   Type *OpTy = LHS->getType();
4933   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
4934 
4935   // LHS ==> RHS by definition
4936   if (LHS == RHS)
4937     return LHSIsTrue;
4938 
4939   // FIXME: Extending the code below to handle vectors.
4940   if (OpTy->isVectorTy())
4941     return None;
4942 
4943   assert(OpTy->isIntegerTy(1) && "implied by above");
4944 
4945   // Both LHS and RHS are icmps.
4946   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
4947   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
4948   if (LHSCmp && RHSCmp)
4949     return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
4950 
4951   // The LHS should be an 'or' or an 'and' instruction.  We expect the RHS to be
4952   // an icmp. FIXME: Add support for and/or on the RHS.
4953   const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
4954   if (LHSBO && RHSCmp) {
4955     if ((LHSBO->getOpcode() == Instruction::And ||
4956          LHSBO->getOpcode() == Instruction::Or))
4957       return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
4958   }
4959   return None;
4960 }
4961