1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GetElementPtrTypeIterator.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Operator.h"
59 #include "llvm/IR/PatternMatch.h"
60 #include "llvm/IR/Type.h"
61 #include "llvm/IR/User.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/KnownBits.h"
68 #include "llvm/Support/MathExtras.h"
69 #include <algorithm>
70 #include <array>
71 #include <cassert>
72 #include <cstdint>
73 #include <iterator>
74 #include <utility>
75 
76 using namespace llvm;
77 using namespace llvm::PatternMatch;
78 
79 const unsigned MaxDepth = 6;
80 
81 // Controls the number of uses of the value searched for possible
82 // dominating comparisons.
83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84                                               cl::Hidden, cl::init(20));
85 
86 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
87 /// returns the element type's bitwidth.
88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
89   if (unsigned BitWidth = Ty->getScalarSizeInBits())
90     return BitWidth;
91 
92   return DL.getPointerTypeSizeInBits(Ty);
93 }
94 
95 namespace {
96 
97 // Simplifying using an assume can only be done in a particular control-flow
98 // context (the context instruction provides that context). If an assume and
99 // the context instruction are not in the same block then the DT helps in
100 // figuring out if we can use it.
101 struct Query {
102   const DataLayout &DL;
103   AssumptionCache *AC;
104   const Instruction *CxtI;
105   const DominatorTree *DT;
106 
107   // Unlike the other analyses, this may be a nullptr because not all clients
108   // provide it currently.
109   OptimizationRemarkEmitter *ORE;
110 
111   /// Set of assumptions that should be excluded from further queries.
112   /// This is because of the potential for mutual recursion to cause
113   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
114   /// classic case of this is assume(x = y), which will attempt to determine
115   /// bits in x from bits in y, which will attempt to determine bits in y from
116   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
117   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
118   /// (all of which can call computeKnownBits), and so on.
119   std::array<const Value *, MaxDepth> Excluded;
120 
121   unsigned NumExcluded = 0;
122 
123   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
124         const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
125       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {}
126 
127   Query(const Query &Q, const Value *NewExcl)
128       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
129         NumExcluded(Q.NumExcluded) {
130     Excluded = Q.Excluded;
131     Excluded[NumExcluded++] = NewExcl;
132     assert(NumExcluded <= Excluded.size());
133   }
134 
135   bool isExcluded(const Value *Value) const {
136     if (NumExcluded == 0)
137       return false;
138     auto End = Excluded.begin() + NumExcluded;
139     return std::find(Excluded.begin(), End, Value) != End;
140   }
141 };
142 
143 } // end anonymous namespace
144 
145 // Given the provided Value and, potentially, a context instruction, return
146 // the preferred context instruction (if any).
147 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
148   // If we've been provided with a context instruction, then use that (provided
149   // it has been inserted).
150   if (CxtI && CxtI->getParent())
151     return CxtI;
152 
153   // If the value is really an already-inserted instruction, then use that.
154   CxtI = dyn_cast<Instruction>(V);
155   if (CxtI && CxtI->getParent())
156     return CxtI;
157 
158   return nullptr;
159 }
160 
161 static void computeKnownBits(const Value *V, KnownBits &Known,
162                              unsigned Depth, const Query &Q);
163 
164 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
165                             const DataLayout &DL, unsigned Depth,
166                             AssumptionCache *AC, const Instruction *CxtI,
167                             const DominatorTree *DT,
168                             OptimizationRemarkEmitter *ORE) {
169   ::computeKnownBits(V, Known, Depth,
170                      Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
171 }
172 
173 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
174                                   const Query &Q);
175 
176 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
177                                  unsigned Depth, AssumptionCache *AC,
178                                  const Instruction *CxtI,
179                                  const DominatorTree *DT,
180                                  OptimizationRemarkEmitter *ORE) {
181   return ::computeKnownBits(V, Depth,
182                             Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
183 }
184 
185 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
186                                const DataLayout &DL,
187                                AssumptionCache *AC, const Instruction *CxtI,
188                                const DominatorTree *DT) {
189   assert(LHS->getType() == RHS->getType() &&
190          "LHS and RHS should have the same type");
191   assert(LHS->getType()->isIntOrIntVectorTy() &&
192          "LHS and RHS should be integers");
193   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
194   KnownBits LHSKnown(IT->getBitWidth());
195   KnownBits RHSKnown(IT->getBitWidth());
196   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT);
197   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT);
198   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
199 }
200 
201 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
202   for (const User *U : CxtI->users()) {
203     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
204       if (IC->isEquality())
205         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
206           if (C->isNullValue())
207             continue;
208     return false;
209   }
210   return true;
211 }
212 
213 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
214                                    const Query &Q);
215 
216 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
217                                   bool OrZero,
218                                   unsigned Depth, AssumptionCache *AC,
219                                   const Instruction *CxtI,
220                                   const DominatorTree *DT) {
221   return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
222                                   Query(DL, AC, safeCxtI(V, CxtI), DT));
223 }
224 
225 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
226 
227 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
228                           AssumptionCache *AC, const Instruction *CxtI,
229                           const DominatorTree *DT) {
230   return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
231 }
232 
233 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
234                               unsigned Depth,
235                               AssumptionCache *AC, const Instruction *CxtI,
236                               const DominatorTree *DT) {
237   KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
238   return Known.isNonNegative();
239 }
240 
241 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
242                            AssumptionCache *AC, const Instruction *CxtI,
243                            const DominatorTree *DT) {
244   if (auto *CI = dyn_cast<ConstantInt>(V))
245     return CI->getValue().isStrictlyPositive();
246 
247   // TODO: We'd doing two recursive queries here.  We should factor this such
248   // that only a single query is needed.
249   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
250     isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
251 }
252 
253 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
254                            AssumptionCache *AC, const Instruction *CxtI,
255                            const DominatorTree *DT) {
256   KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
257   return Known.isNegative();
258 }
259 
260 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
261 
262 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
263                            const DataLayout &DL,
264                            AssumptionCache *AC, const Instruction *CxtI,
265                            const DominatorTree *DT) {
266   return ::isKnownNonEqual(V1, V2, Query(DL, AC,
267                                          safeCxtI(V1, safeCxtI(V2, CxtI)),
268                                          DT));
269 }
270 
271 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
272                               const Query &Q);
273 
274 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
275                              const DataLayout &DL,
276                              unsigned Depth, AssumptionCache *AC,
277                              const Instruction *CxtI, const DominatorTree *DT) {
278   return ::MaskedValueIsZero(V, Mask, Depth,
279                              Query(DL, AC, safeCxtI(V, CxtI), DT));
280 }
281 
282 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
283                                    const Query &Q);
284 
285 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
286                                   unsigned Depth, AssumptionCache *AC,
287                                   const Instruction *CxtI,
288                                   const DominatorTree *DT) {
289   return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
290 }
291 
292 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
293                                    bool NSW,
294                                    KnownBits &KnownOut, KnownBits &Known2,
295                                    unsigned Depth, const Query &Q) {
296   unsigned BitWidth = KnownOut.getBitWidth();
297 
298   // If an initial sequence of bits in the result is not needed, the
299   // corresponding bits in the operands are not needed.
300   KnownBits LHSKnown(BitWidth);
301   computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
302   computeKnownBits(Op1, Known2, Depth + 1, Q);
303 
304   KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
305 }
306 
307 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
308                                 KnownBits &Known, KnownBits &Known2,
309                                 unsigned Depth, const Query &Q) {
310   unsigned BitWidth = Known.getBitWidth();
311   computeKnownBits(Op1, Known, Depth + 1, Q);
312   computeKnownBits(Op0, Known2, Depth + 1, Q);
313 
314   bool isKnownNegative = false;
315   bool isKnownNonNegative = false;
316   // If the multiplication is known not to overflow, compute the sign bit.
317   if (NSW) {
318     if (Op0 == Op1) {
319       // The product of a number with itself is non-negative.
320       isKnownNonNegative = true;
321     } else {
322       bool isKnownNonNegativeOp1 = Known.isNonNegative();
323       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
324       bool isKnownNegativeOp1 = Known.isNegative();
325       bool isKnownNegativeOp0 = Known2.isNegative();
326       // The product of two numbers with the same sign is non-negative.
327       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
328         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
329       // The product of a negative number and a non-negative number is either
330       // negative or zero.
331       if (!isKnownNonNegative)
332         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
333                            isKnownNonZero(Op0, Depth, Q)) ||
334                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
335                            isKnownNonZero(Op1, Depth, Q));
336     }
337   }
338 
339   assert(!Known.hasConflict() && !Known2.hasConflict());
340   // Compute a conservative estimate for high known-0 bits.
341   unsigned LeadZ =  std::max(Known.countMinLeadingZeros() +
342                              Known2.countMinLeadingZeros(),
343                              BitWidth) - BitWidth;
344   LeadZ = std::min(LeadZ, BitWidth);
345 
346   // The result of the bottom bits of an integer multiply can be
347   // inferred by looking at the bottom bits of both operands and
348   // multiplying them together.
349   // We can infer at least the minimum number of known trailing bits
350   // of both operands. Depending on number of trailing zeros, we can
351   // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
352   // a and b are divisible by m and n respectively.
353   // We then calculate how many of those bits are inferrable and set
354   // the output. For example, the i8 mul:
355   //  a = XXXX1100 (12)
356   //  b = XXXX1110 (14)
357   // We know the bottom 3 bits are zero since the first can be divided by
358   // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
359   // Applying the multiplication to the trimmed arguments gets:
360   //    XX11 (3)
361   //    X111 (7)
362   // -------
363   //    XX11
364   //   XX11
365   //  XX11
366   // XX11
367   // -------
368   // XXXXX01
369   // Which allows us to infer the 2 LSBs. Since we're multiplying the result
370   // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
371   // The proof for this can be described as:
372   // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
373   //      (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
374   //                    umin(countTrailingZeros(C2), C6) +
375   //                    umin(C5 - umin(countTrailingZeros(C1), C5),
376   //                         C6 - umin(countTrailingZeros(C2), C6)))) - 1)
377   // %aa = shl i8 %a, C5
378   // %bb = shl i8 %b, C6
379   // %aaa = or i8 %aa, C1
380   // %bbb = or i8 %bb, C2
381   // %mul = mul i8 %aaa, %bbb
382   // %mask = and i8 %mul, C7
383   //   =>
384   // %mask = i8 ((C1*C2)&C7)
385   // Where C5, C6 describe the known bits of %a, %b
386   // C1, C2 describe the known bottom bits of %a, %b.
387   // C7 describes the mask of the known bits of the result.
388   APInt Bottom0 = Known.One;
389   APInt Bottom1 = Known2.One;
390 
391   // How many times we'd be able to divide each argument by 2 (shr by 1).
392   // This gives us the number of trailing zeros on the multiplication result.
393   unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
394   unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
395   unsigned TrailZero0 = Known.countMinTrailingZeros();
396   unsigned TrailZero1 = Known2.countMinTrailingZeros();
397   unsigned TrailZ = TrailZero0 + TrailZero1;
398 
399   // Figure out the fewest known-bits operand.
400   unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
401                                       TrailBitsKnown1 - TrailZero1);
402   unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
403 
404   APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
405                       Bottom1.getLoBits(TrailBitsKnown1);
406 
407   Known.resetAll();
408   Known.Zero.setHighBits(LeadZ);
409   Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
410   Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
411 
412   // Only make use of no-wrap flags if we failed to compute the sign bit
413   // directly.  This matters if the multiplication always overflows, in
414   // which case we prefer to follow the result of the direct computation,
415   // though as the program is invoking undefined behaviour we can choose
416   // whatever we like here.
417   if (isKnownNonNegative && !Known.isNegative())
418     Known.makeNonNegative();
419   else if (isKnownNegative && !Known.isNonNegative())
420     Known.makeNegative();
421 }
422 
423 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
424                                              KnownBits &Known) {
425   unsigned BitWidth = Known.getBitWidth();
426   unsigned NumRanges = Ranges.getNumOperands() / 2;
427   assert(NumRanges >= 1);
428 
429   Known.Zero.setAllBits();
430   Known.One.setAllBits();
431 
432   for (unsigned i = 0; i < NumRanges; ++i) {
433     ConstantInt *Lower =
434         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
435     ConstantInt *Upper =
436         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
437     ConstantRange Range(Lower->getValue(), Upper->getValue());
438 
439     // The first CommonPrefixBits of all values in Range are equal.
440     unsigned CommonPrefixBits =
441         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
442 
443     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
444     Known.One &= Range.getUnsignedMax() & Mask;
445     Known.Zero &= ~Range.getUnsignedMax() & Mask;
446   }
447 }
448 
449 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
450   SmallVector<const Value *, 16> WorkSet(1, I);
451   SmallPtrSet<const Value *, 32> Visited;
452   SmallPtrSet<const Value *, 16> EphValues;
453 
454   // The instruction defining an assumption's condition itself is always
455   // considered ephemeral to that assumption (even if it has other
456   // non-ephemeral users). See r246696's test case for an example.
457   if (is_contained(I->operands(), E))
458     return true;
459 
460   while (!WorkSet.empty()) {
461     const Value *V = WorkSet.pop_back_val();
462     if (!Visited.insert(V).second)
463       continue;
464 
465     // If all uses of this value are ephemeral, then so is this value.
466     if (llvm::all_of(V->users(), [&](const User *U) {
467                                    return EphValues.count(U);
468                                  })) {
469       if (V == E)
470         return true;
471 
472       if (V == I || isSafeToSpeculativelyExecute(V)) {
473        EphValues.insert(V);
474        if (const User *U = dyn_cast<User>(V))
475          for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
476               J != JE; ++J)
477            WorkSet.push_back(*J);
478       }
479     }
480   }
481 
482   return false;
483 }
484 
485 // Is this an intrinsic that cannot be speculated but also cannot trap?
486 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
487   if (const CallInst *CI = dyn_cast<CallInst>(I))
488     if (Function *F = CI->getCalledFunction())
489       switch (F->getIntrinsicID()) {
490       default: break;
491       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
492       case Intrinsic::assume:
493       case Intrinsic::sideeffect:
494       case Intrinsic::dbg_declare:
495       case Intrinsic::dbg_value:
496       case Intrinsic::invariant_start:
497       case Intrinsic::invariant_end:
498       case Intrinsic::lifetime_start:
499       case Intrinsic::lifetime_end:
500       case Intrinsic::objectsize:
501       case Intrinsic::ptr_annotation:
502       case Intrinsic::var_annotation:
503         return true;
504       }
505 
506   return false;
507 }
508 
509 bool llvm::isValidAssumeForContext(const Instruction *Inv,
510                                    const Instruction *CxtI,
511                                    const DominatorTree *DT) {
512   // There are two restrictions on the use of an assume:
513   //  1. The assume must dominate the context (or the control flow must
514   //     reach the assume whenever it reaches the context).
515   //  2. The context must not be in the assume's set of ephemeral values
516   //     (otherwise we will use the assume to prove that the condition
517   //     feeding the assume is trivially true, thus causing the removal of
518   //     the assume).
519 
520   if (DT) {
521     if (DT->dominates(Inv, CxtI))
522       return true;
523   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
524     // We don't have a DT, but this trivially dominates.
525     return true;
526   }
527 
528   // With or without a DT, the only remaining case we will check is if the
529   // instructions are in the same BB.  Give up if that is not the case.
530   if (Inv->getParent() != CxtI->getParent())
531     return false;
532 
533   // If we have a dom tree, then we now know that the assume doens't dominate
534   // the other instruction.  If we don't have a dom tree then we can check if
535   // the assume is first in the BB.
536   if (!DT) {
537     // Search forward from the assume until we reach the context (or the end
538     // of the block); the common case is that the assume will come first.
539     for (auto I = std::next(BasicBlock::const_iterator(Inv)),
540          IE = Inv->getParent()->end(); I != IE; ++I)
541       if (&*I == CxtI)
542         return true;
543   }
544 
545   // The context comes first, but they're both in the same block. Make sure
546   // there is nothing in between that might interrupt the control flow.
547   for (BasicBlock::const_iterator I =
548          std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
549        I != IE; ++I)
550     if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
551       return false;
552 
553   return !isEphemeralValueOf(Inv, CxtI);
554 }
555 
556 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
557                                        unsigned Depth, const Query &Q) {
558   // Use of assumptions is context-sensitive. If we don't have a context, we
559   // cannot use them!
560   if (!Q.AC || !Q.CxtI)
561     return;
562 
563   unsigned BitWidth = Known.getBitWidth();
564 
565   // Note that the patterns below need to be kept in sync with the code
566   // in AssumptionCache::updateAffectedValues.
567 
568   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
569     if (!AssumeVH)
570       continue;
571     CallInst *I = cast<CallInst>(AssumeVH);
572     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
573            "Got assumption for the wrong function!");
574     if (Q.isExcluded(I))
575       continue;
576 
577     // Warning: This loop can end up being somewhat performance sensetive.
578     // We're running this loop for once for each value queried resulting in a
579     // runtime of ~O(#assumes * #values).
580 
581     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
582            "must be an assume intrinsic");
583 
584     Value *Arg = I->getArgOperand(0);
585 
586     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
587       assert(BitWidth == 1 && "assume operand is not i1?");
588       Known.setAllOnes();
589       return;
590     }
591     if (match(Arg, m_Not(m_Specific(V))) &&
592         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
593       assert(BitWidth == 1 && "assume operand is not i1?");
594       Known.setAllZero();
595       return;
596     }
597 
598     // The remaining tests are all recursive, so bail out if we hit the limit.
599     if (Depth == MaxDepth)
600       continue;
601 
602     Value *A, *B;
603     auto m_V = m_CombineOr(m_Specific(V),
604                            m_CombineOr(m_PtrToInt(m_Specific(V)),
605                            m_BitCast(m_Specific(V))));
606 
607     CmpInst::Predicate Pred;
608     uint64_t C;
609     // assume(v = a)
610     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
611         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
612       KnownBits RHSKnown(BitWidth);
613       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
614       Known.Zero |= RHSKnown.Zero;
615       Known.One  |= RHSKnown.One;
616     // assume(v & b = a)
617     } else if (match(Arg,
618                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
619                Pred == ICmpInst::ICMP_EQ &&
620                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
621       KnownBits RHSKnown(BitWidth);
622       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
623       KnownBits MaskKnown(BitWidth);
624       computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
625 
626       // For those bits in the mask that are known to be one, we can propagate
627       // known bits from the RHS to V.
628       Known.Zero |= RHSKnown.Zero & MaskKnown.One;
629       Known.One  |= RHSKnown.One  & MaskKnown.One;
630     // assume(~(v & b) = a)
631     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
632                                    m_Value(A))) &&
633                Pred == ICmpInst::ICMP_EQ &&
634                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
635       KnownBits RHSKnown(BitWidth);
636       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
637       KnownBits MaskKnown(BitWidth);
638       computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
639 
640       // For those bits in the mask that are known to be one, we can propagate
641       // inverted known bits from the RHS to V.
642       Known.Zero |= RHSKnown.One  & MaskKnown.One;
643       Known.One  |= RHSKnown.Zero & MaskKnown.One;
644     // assume(v | b = a)
645     } else if (match(Arg,
646                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
647                Pred == ICmpInst::ICMP_EQ &&
648                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
649       KnownBits RHSKnown(BitWidth);
650       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
651       KnownBits BKnown(BitWidth);
652       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
653 
654       // For those bits in B that are known to be zero, we can propagate known
655       // bits from the RHS to V.
656       Known.Zero |= RHSKnown.Zero & BKnown.Zero;
657       Known.One  |= RHSKnown.One  & BKnown.Zero;
658     // assume(~(v | b) = a)
659     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
660                                    m_Value(A))) &&
661                Pred == ICmpInst::ICMP_EQ &&
662                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
663       KnownBits RHSKnown(BitWidth);
664       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
665       KnownBits BKnown(BitWidth);
666       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
667 
668       // For those bits in B that are known to be zero, we can propagate
669       // inverted known bits from the RHS to V.
670       Known.Zero |= RHSKnown.One  & BKnown.Zero;
671       Known.One  |= RHSKnown.Zero & BKnown.Zero;
672     // assume(v ^ b = a)
673     } else if (match(Arg,
674                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
675                Pred == ICmpInst::ICMP_EQ &&
676                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
677       KnownBits RHSKnown(BitWidth);
678       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
679       KnownBits BKnown(BitWidth);
680       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
681 
682       // For those bits in B that are known to be zero, we can propagate known
683       // bits from the RHS to V. For those bits in B that are known to be one,
684       // we can propagate inverted known bits from the RHS to V.
685       Known.Zero |= RHSKnown.Zero & BKnown.Zero;
686       Known.One  |= RHSKnown.One  & BKnown.Zero;
687       Known.Zero |= RHSKnown.One  & BKnown.One;
688       Known.One  |= RHSKnown.Zero & BKnown.One;
689     // assume(~(v ^ b) = a)
690     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
691                                    m_Value(A))) &&
692                Pred == ICmpInst::ICMP_EQ &&
693                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
694       KnownBits RHSKnown(BitWidth);
695       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
696       KnownBits BKnown(BitWidth);
697       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
698 
699       // For those bits in B that are known to be zero, we can propagate
700       // inverted known bits from the RHS to V. For those bits in B that are
701       // known to be one, we can propagate known bits from the RHS to V.
702       Known.Zero |= RHSKnown.One  & BKnown.Zero;
703       Known.One  |= RHSKnown.Zero & BKnown.Zero;
704       Known.Zero |= RHSKnown.Zero & BKnown.One;
705       Known.One  |= RHSKnown.One  & BKnown.One;
706     // assume(v << c = a)
707     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
708                                    m_Value(A))) &&
709                Pred == ICmpInst::ICMP_EQ &&
710                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
711                C < BitWidth) {
712       KnownBits RHSKnown(BitWidth);
713       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
714       // For those bits in RHS that are known, we can propagate them to known
715       // bits in V shifted to the right by C.
716       RHSKnown.Zero.lshrInPlace(C);
717       Known.Zero |= RHSKnown.Zero;
718       RHSKnown.One.lshrInPlace(C);
719       Known.One  |= RHSKnown.One;
720     // assume(~(v << c) = a)
721     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
722                                    m_Value(A))) &&
723                Pred == ICmpInst::ICMP_EQ &&
724                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
725                C < BitWidth) {
726       KnownBits RHSKnown(BitWidth);
727       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
728       // For those bits in RHS that are known, we can propagate them inverted
729       // to known bits in V shifted to the right by C.
730       RHSKnown.One.lshrInPlace(C);
731       Known.Zero |= RHSKnown.One;
732       RHSKnown.Zero.lshrInPlace(C);
733       Known.One  |= RHSKnown.Zero;
734     // assume(v >> c = a)
735     } else if (match(Arg,
736                      m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
737                               m_Value(A))) &&
738                Pred == ICmpInst::ICMP_EQ &&
739                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
740                C < BitWidth) {
741       KnownBits RHSKnown(BitWidth);
742       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
743       // For those bits in RHS that are known, we can propagate them to known
744       // bits in V shifted to the right by C.
745       Known.Zero |= RHSKnown.Zero << C;
746       Known.One  |= RHSKnown.One  << C;
747     // assume(~(v >> c) = a)
748     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
749                                    m_Value(A))) &&
750                Pred == ICmpInst::ICMP_EQ &&
751                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
752                C < BitWidth) {
753       KnownBits RHSKnown(BitWidth);
754       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
755       // For those bits in RHS that are known, we can propagate them inverted
756       // to known bits in V shifted to the right by C.
757       Known.Zero |= RHSKnown.One  << C;
758       Known.One  |= RHSKnown.Zero << C;
759     // assume(v >=_s c) where c is non-negative
760     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
761                Pred == ICmpInst::ICMP_SGE &&
762                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
763       KnownBits RHSKnown(BitWidth);
764       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
765 
766       if (RHSKnown.isNonNegative()) {
767         // We know that the sign bit is zero.
768         Known.makeNonNegative();
769       }
770     // assume(v >_s c) where c is at least -1.
771     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
772                Pred == ICmpInst::ICMP_SGT &&
773                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
774       KnownBits RHSKnown(BitWidth);
775       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
776 
777       if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
778         // We know that the sign bit is zero.
779         Known.makeNonNegative();
780       }
781     // assume(v <=_s c) where c is negative
782     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
783                Pred == ICmpInst::ICMP_SLE &&
784                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
785       KnownBits RHSKnown(BitWidth);
786       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
787 
788       if (RHSKnown.isNegative()) {
789         // We know that the sign bit is one.
790         Known.makeNegative();
791       }
792     // assume(v <_s c) where c is non-positive
793     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
794                Pred == ICmpInst::ICMP_SLT &&
795                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
796       KnownBits RHSKnown(BitWidth);
797       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
798 
799       if (RHSKnown.isZero() || RHSKnown.isNegative()) {
800         // We know that the sign bit is one.
801         Known.makeNegative();
802       }
803     // assume(v <=_u c)
804     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
805                Pred == ICmpInst::ICMP_ULE &&
806                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
807       KnownBits RHSKnown(BitWidth);
808       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
809 
810       // Whatever high bits in c are zero are known to be zero.
811       Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
812       // assume(v <_u c)
813     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
814                Pred == ICmpInst::ICMP_ULT &&
815                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
816       KnownBits RHSKnown(BitWidth);
817       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
818 
819       // Whatever high bits in c are zero are known to be zero (if c is a power
820       // of 2, then one more).
821       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
822         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
823       else
824         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
825     }
826   }
827 
828   // If assumptions conflict with each other or previous known bits, then we
829   // have a logical fallacy. It's possible that the assumption is not reachable,
830   // so this isn't a real bug. On the other hand, the program may have undefined
831   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
832   // clear out the known bits, try to warn the user, and hope for the best.
833   if (Known.Zero.intersects(Known.One)) {
834     Known.resetAll();
835 
836     if (Q.ORE)
837       Q.ORE->emit([&]() {
838         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
839         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
840                                           CxtI)
841                << "Detected conflicting code assumptions. Program may "
842                   "have undefined behavior, or compiler may have "
843                   "internal error.";
844       });
845   }
846 }
847 
848 /// Compute known bits from a shift operator, including those with a
849 /// non-constant shift amount. Known is the output of this function. Known2 is a
850 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
851 /// operator-specific functors that, given the known-zero or known-one bits
852 /// respectively, and a shift amount, compute the implied known-zero or
853 /// known-one bits of the shift operator's result respectively for that shift
854 /// amount. The results from calling KZF and KOF are conservatively combined for
855 /// all permitted shift amounts.
856 static void computeKnownBitsFromShiftOperator(
857     const Operator *I, KnownBits &Known, KnownBits &Known2,
858     unsigned Depth, const Query &Q,
859     function_ref<APInt(const APInt &, unsigned)> KZF,
860     function_ref<APInt(const APInt &, unsigned)> KOF) {
861   unsigned BitWidth = Known.getBitWidth();
862 
863   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
864     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
865 
866     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
867     Known.Zero = KZF(Known.Zero, ShiftAmt);
868     Known.One  = KOF(Known.One, ShiftAmt);
869     // If the known bits conflict, this must be an overflowing left shift, so
870     // the shift result is poison. We can return anything we want. Choose 0 for
871     // the best folding opportunity.
872     if (Known.hasConflict())
873       Known.setAllZero();
874 
875     return;
876   }
877 
878   computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
879 
880   // If the shift amount could be greater than or equal to the bit-width of the
881   // LHS, the value could be poison, but bail out because the check below is
882   // expensive. TODO: Should we just carry on?
883   if ((~Known.Zero).uge(BitWidth)) {
884     Known.resetAll();
885     return;
886   }
887 
888   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
889   // BitWidth > 64 and any upper bits are known, we'll end up returning the
890   // limit value (which implies all bits are known).
891   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
892   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
893 
894   // It would be more-clearly correct to use the two temporaries for this
895   // calculation. Reusing the APInts here to prevent unnecessary allocations.
896   Known.resetAll();
897 
898   // If we know the shifter operand is nonzero, we can sometimes infer more
899   // known bits. However this is expensive to compute, so be lazy about it and
900   // only compute it when absolutely necessary.
901   Optional<bool> ShifterOperandIsNonZero;
902 
903   // Early exit if we can't constrain any well-defined shift amount.
904   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
905       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
906     ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
907     if (!*ShifterOperandIsNonZero)
908       return;
909   }
910 
911   computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
912 
913   Known.Zero.setAllBits();
914   Known.One.setAllBits();
915   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
916     // Combine the shifted known input bits only for those shift amounts
917     // compatible with its known constraints.
918     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
919       continue;
920     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
921       continue;
922     // If we know the shifter is nonzero, we may be able to infer more known
923     // bits. This check is sunk down as far as possible to avoid the expensive
924     // call to isKnownNonZero if the cheaper checks above fail.
925     if (ShiftAmt == 0) {
926       if (!ShifterOperandIsNonZero.hasValue())
927         ShifterOperandIsNonZero =
928             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
929       if (*ShifterOperandIsNonZero)
930         continue;
931     }
932 
933     Known.Zero &= KZF(Known2.Zero, ShiftAmt);
934     Known.One  &= KOF(Known2.One, ShiftAmt);
935   }
936 
937   // If the known bits conflict, the result is poison. Return a 0 and hope the
938   // caller can further optimize that.
939   if (Known.hasConflict())
940     Known.setAllZero();
941 }
942 
943 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
944                                          unsigned Depth, const Query &Q) {
945   unsigned BitWidth = Known.getBitWidth();
946 
947   KnownBits Known2(Known);
948   switch (I->getOpcode()) {
949   default: break;
950   case Instruction::Load:
951     if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
952       computeKnownBitsFromRangeMetadata(*MD, Known);
953     break;
954   case Instruction::And: {
955     // If either the LHS or the RHS are Zero, the result is zero.
956     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
957     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
958 
959     // Output known-1 bits are only known if set in both the LHS & RHS.
960     Known.One &= Known2.One;
961     // Output known-0 are known to be clear if zero in either the LHS | RHS.
962     Known.Zero |= Known2.Zero;
963 
964     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
965     // here we handle the more general case of adding any odd number by
966     // matching the form add(x, add(x, y)) where y is odd.
967     // TODO: This could be generalized to clearing any bit set in y where the
968     // following bit is known to be unset in y.
969     Value *Y = nullptr;
970     if (!Known.Zero[0] && !Known.One[0] &&
971         (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
972                                        m_Value(Y))) ||
973          match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
974                                        m_Value(Y))))) {
975       Known2.resetAll();
976       computeKnownBits(Y, Known2, Depth + 1, Q);
977       if (Known2.countMinTrailingOnes() > 0)
978         Known.Zero.setBit(0);
979     }
980     break;
981   }
982   case Instruction::Or:
983     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
984     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
985 
986     // Output known-0 bits are only known if clear in both the LHS & RHS.
987     Known.Zero &= Known2.Zero;
988     // Output known-1 are known to be set if set in either the LHS | RHS.
989     Known.One |= Known2.One;
990     break;
991   case Instruction::Xor: {
992     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
993     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
994 
995     // Output known-0 bits are known if clear or set in both the LHS & RHS.
996     APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
997     // Output known-1 are known to be set if set in only one of the LHS, RHS.
998     Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
999     Known.Zero = std::move(KnownZeroOut);
1000     break;
1001   }
1002   case Instruction::Mul: {
1003     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1004     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1005                         Known2, Depth, Q);
1006     break;
1007   }
1008   case Instruction::UDiv: {
1009     // For the purposes of computing leading zeros we can conservatively
1010     // treat a udiv as a logical right shift by the power of 2 known to
1011     // be less than the denominator.
1012     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1013     unsigned LeadZ = Known2.countMinLeadingZeros();
1014 
1015     Known2.resetAll();
1016     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1017     unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1018     if (RHSMaxLeadingZeros != BitWidth)
1019       LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1020 
1021     Known.Zero.setHighBits(LeadZ);
1022     break;
1023   }
1024   case Instruction::Select: {
1025     const Value *LHS, *RHS;
1026     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1027     if (SelectPatternResult::isMinOrMax(SPF)) {
1028       computeKnownBits(RHS, Known, Depth + 1, Q);
1029       computeKnownBits(LHS, Known2, Depth + 1, Q);
1030     } else {
1031       computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1032       computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1033     }
1034 
1035     unsigned MaxHighOnes = 0;
1036     unsigned MaxHighZeros = 0;
1037     if (SPF == SPF_SMAX) {
1038       // If both sides are negative, the result is negative.
1039       if (Known.isNegative() && Known2.isNegative())
1040         // We can derive a lower bound on the result by taking the max of the
1041         // leading one bits.
1042         MaxHighOnes =
1043             std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1044       // If either side is non-negative, the result is non-negative.
1045       else if (Known.isNonNegative() || Known2.isNonNegative())
1046         MaxHighZeros = 1;
1047     } else if (SPF == SPF_SMIN) {
1048       // If both sides are non-negative, the result is non-negative.
1049       if (Known.isNonNegative() && Known2.isNonNegative())
1050         // We can derive an upper bound on the result by taking the max of the
1051         // leading zero bits.
1052         MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1053                                 Known2.countMinLeadingZeros());
1054       // If either side is negative, the result is negative.
1055       else if (Known.isNegative() || Known2.isNegative())
1056         MaxHighOnes = 1;
1057     } else if (SPF == SPF_UMAX) {
1058       // We can derive a lower bound on the result by taking the max of the
1059       // leading one bits.
1060       MaxHighOnes =
1061           std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1062     } else if (SPF == SPF_UMIN) {
1063       // We can derive an upper bound on the result by taking the max of the
1064       // leading zero bits.
1065       MaxHighZeros =
1066           std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1067     }
1068 
1069     // Only known if known in both the LHS and RHS.
1070     Known.One &= Known2.One;
1071     Known.Zero &= Known2.Zero;
1072     if (MaxHighOnes > 0)
1073       Known.One.setHighBits(MaxHighOnes);
1074     if (MaxHighZeros > 0)
1075       Known.Zero.setHighBits(MaxHighZeros);
1076     break;
1077   }
1078   case Instruction::FPTrunc:
1079   case Instruction::FPExt:
1080   case Instruction::FPToUI:
1081   case Instruction::FPToSI:
1082   case Instruction::SIToFP:
1083   case Instruction::UIToFP:
1084     break; // Can't work with floating point.
1085   case Instruction::PtrToInt:
1086   case Instruction::IntToPtr:
1087     // Fall through and handle them the same as zext/trunc.
1088     LLVM_FALLTHROUGH;
1089   case Instruction::ZExt:
1090   case Instruction::Trunc: {
1091     Type *SrcTy = I->getOperand(0)->getType();
1092 
1093     unsigned SrcBitWidth;
1094     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1095     // which fall through here.
1096     SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1097 
1098     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1099     Known = Known.zextOrTrunc(SrcBitWidth);
1100     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1101     Known = Known.zextOrTrunc(BitWidth);
1102     // Any top bits are known to be zero.
1103     if (BitWidth > SrcBitWidth)
1104       Known.Zero.setBitsFrom(SrcBitWidth);
1105     break;
1106   }
1107   case Instruction::BitCast: {
1108     Type *SrcTy = I->getOperand(0)->getType();
1109     if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1110         // TODO: For now, not handling conversions like:
1111         // (bitcast i64 %x to <2 x i32>)
1112         !I->getType()->isVectorTy()) {
1113       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1114       break;
1115     }
1116     break;
1117   }
1118   case Instruction::SExt: {
1119     // Compute the bits in the result that are not present in the input.
1120     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1121 
1122     Known = Known.trunc(SrcBitWidth);
1123     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1124     // If the sign bit of the input is known set or clear, then we know the
1125     // top bits of the result.
1126     Known = Known.sext(BitWidth);
1127     break;
1128   }
1129   case Instruction::Shl: {
1130     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1131     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1132     auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1133       APInt KZResult = KnownZero << ShiftAmt;
1134       KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1135       // If this shift has "nsw" keyword, then the result is either a poison
1136       // value or has the same sign bit as the first operand.
1137       if (NSW && KnownZero.isSignBitSet())
1138         KZResult.setSignBit();
1139       return KZResult;
1140     };
1141 
1142     auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1143       APInt KOResult = KnownOne << ShiftAmt;
1144       if (NSW && KnownOne.isSignBitSet())
1145         KOResult.setSignBit();
1146       return KOResult;
1147     };
1148 
1149     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1150     break;
1151   }
1152   case Instruction::LShr: {
1153     // (lshr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1154     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1155       APInt KZResult = KnownZero.lshr(ShiftAmt);
1156       // High bits known zero.
1157       KZResult.setHighBits(ShiftAmt);
1158       return KZResult;
1159     };
1160 
1161     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1162       return KnownOne.lshr(ShiftAmt);
1163     };
1164 
1165     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1166     break;
1167   }
1168   case Instruction::AShr: {
1169     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1170     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1171       return KnownZero.ashr(ShiftAmt);
1172     };
1173 
1174     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1175       return KnownOne.ashr(ShiftAmt);
1176     };
1177 
1178     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1179     break;
1180   }
1181   case Instruction::Sub: {
1182     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1183     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1184                            Known, Known2, Depth, Q);
1185     break;
1186   }
1187   case Instruction::Add: {
1188     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1189     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1190                            Known, Known2, Depth, Q);
1191     break;
1192   }
1193   case Instruction::SRem:
1194     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1195       APInt RA = Rem->getValue().abs();
1196       if (RA.isPowerOf2()) {
1197         APInt LowBits = RA - 1;
1198         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1199 
1200         // The low bits of the first operand are unchanged by the srem.
1201         Known.Zero = Known2.Zero & LowBits;
1202         Known.One = Known2.One & LowBits;
1203 
1204         // If the first operand is non-negative or has all low bits zero, then
1205         // the upper bits are all zero.
1206         if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1207           Known.Zero |= ~LowBits;
1208 
1209         // If the first operand is negative and not all low bits are zero, then
1210         // the upper bits are all one.
1211         if (Known2.isNegative() && LowBits.intersects(Known2.One))
1212           Known.One |= ~LowBits;
1213 
1214         assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1215         break;
1216       }
1217     }
1218 
1219     // The sign bit is the LHS's sign bit, except when the result of the
1220     // remainder is zero.
1221     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1222     // If it's known zero, our sign bit is also zero.
1223     if (Known2.isNonNegative())
1224       Known.makeNonNegative();
1225 
1226     break;
1227   case Instruction::URem: {
1228     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1229       const APInt &RA = Rem->getValue();
1230       if (RA.isPowerOf2()) {
1231         APInt LowBits = (RA - 1);
1232         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1233         Known.Zero |= ~LowBits;
1234         Known.One &= LowBits;
1235         break;
1236       }
1237     }
1238 
1239     // Since the result is less than or equal to either operand, any leading
1240     // zero bits in either operand must also exist in the result.
1241     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1242     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1243 
1244     unsigned Leaders =
1245         std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1246     Known.resetAll();
1247     Known.Zero.setHighBits(Leaders);
1248     break;
1249   }
1250 
1251   case Instruction::Alloca: {
1252     const AllocaInst *AI = cast<AllocaInst>(I);
1253     unsigned Align = AI->getAlignment();
1254     if (Align == 0)
1255       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1256 
1257     if (Align > 0)
1258       Known.Zero.setLowBits(countTrailingZeros(Align));
1259     break;
1260   }
1261   case Instruction::GetElementPtr: {
1262     // Analyze all of the subscripts of this getelementptr instruction
1263     // to determine if we can prove known low zero bits.
1264     KnownBits LocalKnown(BitWidth);
1265     computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1266     unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1267 
1268     gep_type_iterator GTI = gep_type_begin(I);
1269     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1270       Value *Index = I->getOperand(i);
1271       if (StructType *STy = GTI.getStructTypeOrNull()) {
1272         // Handle struct member offset arithmetic.
1273 
1274         // Handle case when index is vector zeroinitializer
1275         Constant *CIndex = cast<Constant>(Index);
1276         if (CIndex->isZeroValue())
1277           continue;
1278 
1279         if (CIndex->getType()->isVectorTy())
1280           Index = CIndex->getSplatValue();
1281 
1282         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1283         const StructLayout *SL = Q.DL.getStructLayout(STy);
1284         uint64_t Offset = SL->getElementOffset(Idx);
1285         TrailZ = std::min<unsigned>(TrailZ,
1286                                     countTrailingZeros(Offset));
1287       } else {
1288         // Handle array index arithmetic.
1289         Type *IndexedTy = GTI.getIndexedType();
1290         if (!IndexedTy->isSized()) {
1291           TrailZ = 0;
1292           break;
1293         }
1294         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1295         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1296         LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1297         computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1298         TrailZ = std::min(TrailZ,
1299                           unsigned(countTrailingZeros(TypeSize) +
1300                                    LocalKnown.countMinTrailingZeros()));
1301       }
1302     }
1303 
1304     Known.Zero.setLowBits(TrailZ);
1305     break;
1306   }
1307   case Instruction::PHI: {
1308     const PHINode *P = cast<PHINode>(I);
1309     // Handle the case of a simple two-predecessor recurrence PHI.
1310     // There's a lot more that could theoretically be done here, but
1311     // this is sufficient to catch some interesting cases.
1312     if (P->getNumIncomingValues() == 2) {
1313       for (unsigned i = 0; i != 2; ++i) {
1314         Value *L = P->getIncomingValue(i);
1315         Value *R = P->getIncomingValue(!i);
1316         Operator *LU = dyn_cast<Operator>(L);
1317         if (!LU)
1318           continue;
1319         unsigned Opcode = LU->getOpcode();
1320         // Check for operations that have the property that if
1321         // both their operands have low zero bits, the result
1322         // will have low zero bits.
1323         if (Opcode == Instruction::Add ||
1324             Opcode == Instruction::Sub ||
1325             Opcode == Instruction::And ||
1326             Opcode == Instruction::Or ||
1327             Opcode == Instruction::Mul) {
1328           Value *LL = LU->getOperand(0);
1329           Value *LR = LU->getOperand(1);
1330           // Find a recurrence.
1331           if (LL == I)
1332             L = LR;
1333           else if (LR == I)
1334             L = LL;
1335           else
1336             break;
1337           // Ok, we have a PHI of the form L op= R. Check for low
1338           // zero bits.
1339           computeKnownBits(R, Known2, Depth + 1, Q);
1340 
1341           // We need to take the minimum number of known bits
1342           KnownBits Known3(Known);
1343           computeKnownBits(L, Known3, Depth + 1, Q);
1344 
1345           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1346                                          Known3.countMinTrailingZeros()));
1347 
1348           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1349           if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1350             // If initial value of recurrence is nonnegative, and we are adding
1351             // a nonnegative number with nsw, the result can only be nonnegative
1352             // or poison value regardless of the number of times we execute the
1353             // add in phi recurrence. If initial value is negative and we are
1354             // adding a negative number with nsw, the result can only be
1355             // negative or poison value. Similar arguments apply to sub and mul.
1356             //
1357             // (add non-negative, non-negative) --> non-negative
1358             // (add negative, negative) --> negative
1359             if (Opcode == Instruction::Add) {
1360               if (Known2.isNonNegative() && Known3.isNonNegative())
1361                 Known.makeNonNegative();
1362               else if (Known2.isNegative() && Known3.isNegative())
1363                 Known.makeNegative();
1364             }
1365 
1366             // (sub nsw non-negative, negative) --> non-negative
1367             // (sub nsw negative, non-negative) --> negative
1368             else if (Opcode == Instruction::Sub && LL == I) {
1369               if (Known2.isNonNegative() && Known3.isNegative())
1370                 Known.makeNonNegative();
1371               else if (Known2.isNegative() && Known3.isNonNegative())
1372                 Known.makeNegative();
1373             }
1374 
1375             // (mul nsw non-negative, non-negative) --> non-negative
1376             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1377                      Known3.isNonNegative())
1378               Known.makeNonNegative();
1379           }
1380 
1381           break;
1382         }
1383       }
1384     }
1385 
1386     // Unreachable blocks may have zero-operand PHI nodes.
1387     if (P->getNumIncomingValues() == 0)
1388       break;
1389 
1390     // Otherwise take the unions of the known bit sets of the operands,
1391     // taking conservative care to avoid excessive recursion.
1392     if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1393       // Skip if every incoming value references to ourself.
1394       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1395         break;
1396 
1397       Known.Zero.setAllBits();
1398       Known.One.setAllBits();
1399       for (Value *IncValue : P->incoming_values()) {
1400         // Skip direct self references.
1401         if (IncValue == P) continue;
1402 
1403         Known2 = KnownBits(BitWidth);
1404         // Recurse, but cap the recursion to one level, because we don't
1405         // want to waste time spinning around in loops.
1406         computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1407         Known.Zero &= Known2.Zero;
1408         Known.One &= Known2.One;
1409         // If all bits have been ruled out, there's no need to check
1410         // more operands.
1411         if (!Known.Zero && !Known.One)
1412           break;
1413       }
1414     }
1415     break;
1416   }
1417   case Instruction::Call:
1418   case Instruction::Invoke:
1419     // If range metadata is attached to this call, set known bits from that,
1420     // and then intersect with known bits based on other properties of the
1421     // function.
1422     if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1423       computeKnownBitsFromRangeMetadata(*MD, Known);
1424     if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1425       computeKnownBits(RV, Known2, Depth + 1, Q);
1426       Known.Zero |= Known2.Zero;
1427       Known.One |= Known2.One;
1428     }
1429     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1430       switch (II->getIntrinsicID()) {
1431       default: break;
1432       case Intrinsic::bitreverse:
1433         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1434         Known.Zero |= Known2.Zero.reverseBits();
1435         Known.One |= Known2.One.reverseBits();
1436         break;
1437       case Intrinsic::bswap:
1438         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1439         Known.Zero |= Known2.Zero.byteSwap();
1440         Known.One |= Known2.One.byteSwap();
1441         break;
1442       case Intrinsic::ctlz: {
1443         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1444         // If we have a known 1, its position is our upper bound.
1445         unsigned PossibleLZ = Known2.One.countLeadingZeros();
1446         // If this call is undefined for 0, the result will be less than 2^n.
1447         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1448           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1449         unsigned LowBits = Log2_32(PossibleLZ)+1;
1450         Known.Zero.setBitsFrom(LowBits);
1451         break;
1452       }
1453       case Intrinsic::cttz: {
1454         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1455         // If we have a known 1, its position is our upper bound.
1456         unsigned PossibleTZ = Known2.One.countTrailingZeros();
1457         // If this call is undefined for 0, the result will be less than 2^n.
1458         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1459           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1460         unsigned LowBits = Log2_32(PossibleTZ)+1;
1461         Known.Zero.setBitsFrom(LowBits);
1462         break;
1463       }
1464       case Intrinsic::ctpop: {
1465         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1466         // We can bound the space the count needs.  Also, bits known to be zero
1467         // can't contribute to the population.
1468         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1469         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1470         Known.Zero.setBitsFrom(LowBits);
1471         // TODO: we could bound KnownOne using the lower bound on the number
1472         // of bits which might be set provided by popcnt KnownOne2.
1473         break;
1474       }
1475       case Intrinsic::x86_sse42_crc32_64_64:
1476         Known.Zero.setBitsFrom(32);
1477         break;
1478       }
1479     }
1480     break;
1481   case Instruction::ExtractElement:
1482     // Look through extract element. At the moment we keep this simple and skip
1483     // tracking the specific element. But at least we might find information
1484     // valid for all elements of the vector (for example if vector is sign
1485     // extended, shifted, etc).
1486     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1487     break;
1488   case Instruction::ExtractValue:
1489     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1490       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1491       if (EVI->getNumIndices() != 1) break;
1492       if (EVI->getIndices()[0] == 0) {
1493         switch (II->getIntrinsicID()) {
1494         default: break;
1495         case Intrinsic::uadd_with_overflow:
1496         case Intrinsic::sadd_with_overflow:
1497           computeKnownBitsAddSub(true, II->getArgOperand(0),
1498                                  II->getArgOperand(1), false, Known, Known2,
1499                                  Depth, Q);
1500           break;
1501         case Intrinsic::usub_with_overflow:
1502         case Intrinsic::ssub_with_overflow:
1503           computeKnownBitsAddSub(false, II->getArgOperand(0),
1504                                  II->getArgOperand(1), false, Known, Known2,
1505                                  Depth, Q);
1506           break;
1507         case Intrinsic::umul_with_overflow:
1508         case Intrinsic::smul_with_overflow:
1509           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1510                               Known, Known2, Depth, Q);
1511           break;
1512         }
1513       }
1514     }
1515   }
1516 }
1517 
1518 /// Determine which bits of V are known to be either zero or one and return
1519 /// them.
1520 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1521   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1522   computeKnownBits(V, Known, Depth, Q);
1523   return Known;
1524 }
1525 
1526 /// Determine which bits of V are known to be either zero or one and return
1527 /// them in the Known bit set.
1528 ///
1529 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1530 /// we cannot optimize based on the assumption that it is zero without changing
1531 /// it to be an explicit zero.  If we don't change it to zero, other code could
1532 /// optimized based on the contradictory assumption that it is non-zero.
1533 /// Because instcombine aggressively folds operations with undef args anyway,
1534 /// this won't lose us code quality.
1535 ///
1536 /// This function is defined on values with integer type, values with pointer
1537 /// type, and vectors of integers.  In the case
1538 /// where V is a vector, known zero, and known one values are the
1539 /// same width as the vector element, and the bit is set only if it is true
1540 /// for all of the elements in the vector.
1541 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1542                       const Query &Q) {
1543   assert(V && "No Value?");
1544   assert(Depth <= MaxDepth && "Limit Search Depth");
1545   unsigned BitWidth = Known.getBitWidth();
1546 
1547   assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1548           V->getType()->isPtrOrPtrVectorTy()) &&
1549          "Not integer or pointer type!");
1550   assert(Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth &&
1551          "V and Known should have same BitWidth");
1552   (void)BitWidth;
1553 
1554   const APInt *C;
1555   if (match(V, m_APInt(C))) {
1556     // We know all of the bits for a scalar constant or a splat vector constant!
1557     Known.One = *C;
1558     Known.Zero = ~Known.One;
1559     return;
1560   }
1561   // Null and aggregate-zero are all-zeros.
1562   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1563     Known.setAllZero();
1564     return;
1565   }
1566   // Handle a constant vector by taking the intersection of the known bits of
1567   // each element.
1568   if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1569     // We know that CDS must be a vector of integers. Take the intersection of
1570     // each element.
1571     Known.Zero.setAllBits(); Known.One.setAllBits();
1572     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1573       APInt Elt = CDS->getElementAsAPInt(i);
1574       Known.Zero &= ~Elt;
1575       Known.One &= Elt;
1576     }
1577     return;
1578   }
1579 
1580   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1581     // We know that CV must be a vector of integers. Take the intersection of
1582     // each element.
1583     Known.Zero.setAllBits(); Known.One.setAllBits();
1584     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1585       Constant *Element = CV->getAggregateElement(i);
1586       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1587       if (!ElementCI) {
1588         Known.resetAll();
1589         return;
1590       }
1591       const APInt &Elt = ElementCI->getValue();
1592       Known.Zero &= ~Elt;
1593       Known.One &= Elt;
1594     }
1595     return;
1596   }
1597 
1598   // Start out not knowing anything.
1599   Known.resetAll();
1600 
1601   // We can't imply anything about undefs.
1602   if (isa<UndefValue>(V))
1603     return;
1604 
1605   // There's no point in looking through other users of ConstantData for
1606   // assumptions.  Confirm that we've handled them all.
1607   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1608 
1609   // Limit search depth.
1610   // All recursive calls that increase depth must come after this.
1611   if (Depth == MaxDepth)
1612     return;
1613 
1614   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1615   // the bits of its aliasee.
1616   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1617     if (!GA->isInterposable())
1618       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1619     return;
1620   }
1621 
1622   if (const Operator *I = dyn_cast<Operator>(V))
1623     computeKnownBitsFromOperator(I, Known, Depth, Q);
1624 
1625   // Aligned pointers have trailing zeros - refine Known.Zero set
1626   if (V->getType()->isPointerTy()) {
1627     unsigned Align = V->getPointerAlignment(Q.DL);
1628     if (Align)
1629       Known.Zero.setLowBits(countTrailingZeros(Align));
1630   }
1631 
1632   // computeKnownBitsFromAssume strictly refines Known.
1633   // Therefore, we run them after computeKnownBitsFromOperator.
1634 
1635   // Check whether a nearby assume intrinsic can determine some known bits.
1636   computeKnownBitsFromAssume(V, Known, Depth, Q);
1637 
1638   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1639 }
1640 
1641 /// Return true if the given value is known to have exactly one
1642 /// bit set when defined. For vectors return true if every element is known to
1643 /// be a power of two when defined. Supports values with integer or pointer
1644 /// types and vectors of integers.
1645 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1646                             const Query &Q) {
1647   assert(Depth <= MaxDepth && "Limit Search Depth");
1648 
1649   // Attempt to match against constants.
1650   if (OrZero && match(V, m_Power2OrZero()))
1651       return true;
1652   if (match(V, m_Power2()))
1653       return true;
1654 
1655   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1656   // it is shifted off the end then the result is undefined.
1657   if (match(V, m_Shl(m_One(), m_Value())))
1658     return true;
1659 
1660   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1661   // the bottom.  If it is shifted off the bottom then the result is undefined.
1662   if (match(V, m_LShr(m_SignMask(), m_Value())))
1663     return true;
1664 
1665   // The remaining tests are all recursive, so bail out if we hit the limit.
1666   if (Depth++ == MaxDepth)
1667     return false;
1668 
1669   Value *X = nullptr, *Y = nullptr;
1670   // A shift left or a logical shift right of a power of two is a power of two
1671   // or zero.
1672   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1673                  match(V, m_LShr(m_Value(X), m_Value()))))
1674     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1675 
1676   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1677     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1678 
1679   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1680     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1681            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1682 
1683   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1684     // A power of two and'd with anything is a power of two or zero.
1685     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1686         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1687       return true;
1688     // X & (-X) is always a power of two or zero.
1689     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1690       return true;
1691     return false;
1692   }
1693 
1694   // Adding a power-of-two or zero to the same power-of-two or zero yields
1695   // either the original power-of-two, a larger power-of-two or zero.
1696   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1697     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1698     if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1699       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1700           match(X, m_And(m_Value(), m_Specific(Y))))
1701         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1702           return true;
1703       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1704           match(Y, m_And(m_Value(), m_Specific(X))))
1705         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1706           return true;
1707 
1708       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1709       KnownBits LHSBits(BitWidth);
1710       computeKnownBits(X, LHSBits, Depth, Q);
1711 
1712       KnownBits RHSBits(BitWidth);
1713       computeKnownBits(Y, RHSBits, Depth, Q);
1714       // If i8 V is a power of two or zero:
1715       //  ZeroBits: 1 1 1 0 1 1 1 1
1716       // ~ZeroBits: 0 0 0 1 0 0 0 0
1717       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1718         // If OrZero isn't set, we cannot give back a zero result.
1719         // Make sure either the LHS or RHS has a bit set.
1720         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1721           return true;
1722     }
1723   }
1724 
1725   // An exact divide or right shift can only shift off zero bits, so the result
1726   // is a power of two only if the first operand is a power of two and not
1727   // copying a sign bit (sdiv int_min, 2).
1728   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1729       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1730     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1731                                   Depth, Q);
1732   }
1733 
1734   return false;
1735 }
1736 
1737 /// \brief Test whether a GEP's result is known to be non-null.
1738 ///
1739 /// Uses properties inherent in a GEP to try to determine whether it is known
1740 /// to be non-null.
1741 ///
1742 /// Currently this routine does not support vector GEPs.
1743 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1744                               const Query &Q) {
1745   if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1746     return false;
1747 
1748   // FIXME: Support vector-GEPs.
1749   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1750 
1751   // If the base pointer is non-null, we cannot walk to a null address with an
1752   // inbounds GEP in address space zero.
1753   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1754     return true;
1755 
1756   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1757   // If so, then the GEP cannot produce a null pointer, as doing so would
1758   // inherently violate the inbounds contract within address space zero.
1759   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1760        GTI != GTE; ++GTI) {
1761     // Struct types are easy -- they must always be indexed by a constant.
1762     if (StructType *STy = GTI.getStructTypeOrNull()) {
1763       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1764       unsigned ElementIdx = OpC->getZExtValue();
1765       const StructLayout *SL = Q.DL.getStructLayout(STy);
1766       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1767       if (ElementOffset > 0)
1768         return true;
1769       continue;
1770     }
1771 
1772     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1773     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1774       continue;
1775 
1776     // Fast path the constant operand case both for efficiency and so we don't
1777     // increment Depth when just zipping down an all-constant GEP.
1778     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1779       if (!OpC->isZero())
1780         return true;
1781       continue;
1782     }
1783 
1784     // We post-increment Depth here because while isKnownNonZero increments it
1785     // as well, when we pop back up that increment won't persist. We don't want
1786     // to recurse 10k times just because we have 10k GEP operands. We don't
1787     // bail completely out because we want to handle constant GEPs regardless
1788     // of depth.
1789     if (Depth++ >= MaxDepth)
1790       continue;
1791 
1792     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1793       return true;
1794   }
1795 
1796   return false;
1797 }
1798 
1799 static bool isKnownNonNullFromDominatingCondition(const Value *V,
1800                                                   const Instruction *CtxI,
1801                                                   const DominatorTree *DT) {
1802   assert(V->getType()->isPointerTy() && "V must be pointer type");
1803   assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
1804 
1805   if (!CtxI || !DT)
1806     return false;
1807 
1808   unsigned NumUsesExplored = 0;
1809   for (auto *U : V->users()) {
1810     // Avoid massive lists
1811     if (NumUsesExplored >= DomConditionsMaxUses)
1812       break;
1813     NumUsesExplored++;
1814 
1815     // If the value is used as an argument to a call or invoke, then argument
1816     // attributes may provide an answer about null-ness.
1817     if (auto CS = ImmutableCallSite(U))
1818       if (auto *CalledFunc = CS.getCalledFunction())
1819         for (const Argument &Arg : CalledFunc->args())
1820           if (CS.getArgOperand(Arg.getArgNo()) == V &&
1821               Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1822             return true;
1823 
1824     // Consider only compare instructions uniquely controlling a branch
1825     CmpInst::Predicate Pred;
1826     if (!match(const_cast<User *>(U),
1827                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1828         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1829       continue;
1830 
1831     for (auto *CmpU : U->users()) {
1832       if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
1833         assert(BI->isConditional() && "uses a comparison!");
1834 
1835         BasicBlock *NonNullSuccessor =
1836             BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1837         BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1838         if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1839           return true;
1840       } else if (Pred == ICmpInst::ICMP_NE &&
1841                  match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
1842                  DT->dominates(cast<Instruction>(CmpU), CtxI)) {
1843         return true;
1844       }
1845     }
1846   }
1847 
1848   return false;
1849 }
1850 
1851 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1852 /// ensure that the value it's attached to is never Value?  'RangeType' is
1853 /// is the type of the value described by the range.
1854 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1855   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1856   assert(NumRanges >= 1);
1857   for (unsigned i = 0; i < NumRanges; ++i) {
1858     ConstantInt *Lower =
1859         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1860     ConstantInt *Upper =
1861         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1862     ConstantRange Range(Lower->getValue(), Upper->getValue());
1863     if (Range.contains(Value))
1864       return false;
1865   }
1866   return true;
1867 }
1868 
1869 /// Return true if the given value is known to be non-zero when defined. For
1870 /// vectors, return true if every element is known to be non-zero when
1871 /// defined. For pointers, if the context instruction and dominator tree are
1872 /// specified, perform context-sensitive analysis and return true if the
1873 /// pointer couldn't possibly be null at the specified instruction.
1874 /// Supports values with integer or pointer type and vectors of integers.
1875 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1876   if (auto *C = dyn_cast<Constant>(V)) {
1877     if (C->isNullValue())
1878       return false;
1879     if (isa<ConstantInt>(C))
1880       // Must be non-zero due to null test above.
1881       return true;
1882 
1883     // For constant vectors, check that all elements are undefined or known
1884     // non-zero to determine that the whole vector is known non-zero.
1885     if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1886       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1887         Constant *Elt = C->getAggregateElement(i);
1888         if (!Elt || Elt->isNullValue())
1889           return false;
1890         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1891           return false;
1892       }
1893       return true;
1894     }
1895 
1896     // A global variable in address space 0 is non null unless extern weak
1897     // or an absolute symbol reference. Other address spaces may have null as a
1898     // valid address for a global, so we can't assume anything.
1899     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1900       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
1901           GV->getType()->getAddressSpace() == 0)
1902         return true;
1903     } else
1904       return false;
1905   }
1906 
1907   if (auto *I = dyn_cast<Instruction>(V)) {
1908     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1909       // If the possible ranges don't contain zero, then the value is
1910       // definitely non-zero.
1911       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1912         const APInt ZeroValue(Ty->getBitWidth(), 0);
1913         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1914           return true;
1915       }
1916     }
1917   }
1918 
1919   // Check for pointer simplifications.
1920   if (V->getType()->isPointerTy()) {
1921     // Alloca never returns null, malloc might.
1922     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
1923       return true;
1924 
1925     // A byval, inalloca, or nonnull argument is never null.
1926     if (const Argument *A = dyn_cast<Argument>(V))
1927       if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
1928         return true;
1929 
1930     // A Load tagged with nonnull metadata is never null.
1931     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
1932       if (LI->getMetadata(LLVMContext::MD_nonnull))
1933         return true;
1934 
1935     if (auto CS = ImmutableCallSite(V))
1936       if (CS.isReturnNonNull())
1937         return true;
1938   }
1939 
1940   // The remaining tests are all recursive, so bail out if we hit the limit.
1941   if (Depth++ >= MaxDepth)
1942     return false;
1943 
1944   // Check for recursive pointer simplifications.
1945   if (V->getType()->isPointerTy()) {
1946     if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
1947       return true;
1948 
1949     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1950       if (isGEPKnownNonNull(GEP, Depth, Q))
1951         return true;
1952   }
1953 
1954   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1955 
1956   // X | Y != 0 if X != 0 or Y != 0.
1957   Value *X = nullptr, *Y = nullptr;
1958   if (match(V, m_Or(m_Value(X), m_Value(Y))))
1959     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1960 
1961   // ext X != 0 if X != 0.
1962   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1963     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1964 
1965   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
1966   // if the lowest bit is shifted off the end.
1967   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1968     // shl nuw can't remove any non-zero bits.
1969     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1970     if (BO->hasNoUnsignedWrap())
1971       return isKnownNonZero(X, Depth, Q);
1972 
1973     KnownBits Known(BitWidth);
1974     computeKnownBits(X, Known, Depth, Q);
1975     if (Known.One[0])
1976       return true;
1977   }
1978   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
1979   // defined if the sign bit is shifted off the end.
1980   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1981     // shr exact can only shift out zero bits.
1982     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1983     if (BO->isExact())
1984       return isKnownNonZero(X, Depth, Q);
1985 
1986     KnownBits Known = computeKnownBits(X, Depth, Q);
1987     if (Known.isNegative())
1988       return true;
1989 
1990     // If the shifter operand is a constant, and all of the bits shifted
1991     // out are known to be zero, and X is known non-zero then at least one
1992     // non-zero bit must remain.
1993     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1994       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1995       // Is there a known one in the portion not shifted out?
1996       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
1997         return true;
1998       // Are all the bits to be shifted out known zero?
1999       if (Known.countMinTrailingZeros() >= ShiftVal)
2000         return isKnownNonZero(X, Depth, Q);
2001     }
2002   }
2003   // div exact can only produce a zero if the dividend is zero.
2004   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2005     return isKnownNonZero(X, Depth, Q);
2006   }
2007   // X + Y.
2008   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2009     KnownBits XKnown = computeKnownBits(X, Depth, Q);
2010     KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2011 
2012     // If X and Y are both non-negative (as signed values) then their sum is not
2013     // zero unless both X and Y are zero.
2014     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2015       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2016         return true;
2017 
2018     // If X and Y are both negative (as signed values) then their sum is not
2019     // zero unless both X and Y equal INT_MIN.
2020     if (XKnown.isNegative() && YKnown.isNegative()) {
2021       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2022       // The sign bit of X is set.  If some other bit is set then X is not equal
2023       // to INT_MIN.
2024       if (XKnown.One.intersects(Mask))
2025         return true;
2026       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2027       // to INT_MIN.
2028       if (YKnown.One.intersects(Mask))
2029         return true;
2030     }
2031 
2032     // The sum of a non-negative number and a power of two is not zero.
2033     if (XKnown.isNonNegative() &&
2034         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2035       return true;
2036     if (YKnown.isNonNegative() &&
2037         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2038       return true;
2039   }
2040   // X * Y.
2041   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2042     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2043     // If X and Y are non-zero then so is X * Y as long as the multiplication
2044     // does not overflow.
2045     if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
2046         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2047       return true;
2048   }
2049   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2050   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2051     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2052         isKnownNonZero(SI->getFalseValue(), Depth, Q))
2053       return true;
2054   }
2055   // PHI
2056   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2057     // Try and detect a recurrence that monotonically increases from a
2058     // starting value, as these are common as induction variables.
2059     if (PN->getNumIncomingValues() == 2) {
2060       Value *Start = PN->getIncomingValue(0);
2061       Value *Induction = PN->getIncomingValue(1);
2062       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2063         std::swap(Start, Induction);
2064       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2065         if (!C->isZero() && !C->isNegative()) {
2066           ConstantInt *X;
2067           if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2068                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2069               !X->isNegative())
2070             return true;
2071         }
2072       }
2073     }
2074     // Check if all incoming values are non-zero constant.
2075     bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2076       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2077     });
2078     if (AllNonZeroConstants)
2079       return true;
2080   }
2081 
2082   KnownBits Known(BitWidth);
2083   computeKnownBits(V, Known, Depth, Q);
2084   return Known.One != 0;
2085 }
2086 
2087 /// Return true if V2 == V1 + X, where X is known non-zero.
2088 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2089   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2090   if (!BO || BO->getOpcode() != Instruction::Add)
2091     return false;
2092   Value *Op = nullptr;
2093   if (V2 == BO->getOperand(0))
2094     Op = BO->getOperand(1);
2095   else if (V2 == BO->getOperand(1))
2096     Op = BO->getOperand(0);
2097   else
2098     return false;
2099   return isKnownNonZero(Op, 0, Q);
2100 }
2101 
2102 /// Return true if it is known that V1 != V2.
2103 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2104   if (V1 == V2)
2105     return false;
2106   if (V1->getType() != V2->getType())
2107     // We can't look through casts yet.
2108     return false;
2109   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2110     return true;
2111 
2112   if (V1->getType()->isIntOrIntVectorTy()) {
2113     // Are any known bits in V1 contradictory to known bits in V2? If V1
2114     // has a known zero where V2 has a known one, they must not be equal.
2115     KnownBits Known1 = computeKnownBits(V1, 0, Q);
2116     KnownBits Known2 = computeKnownBits(V2, 0, Q);
2117 
2118     if (Known1.Zero.intersects(Known2.One) ||
2119         Known2.Zero.intersects(Known1.One))
2120       return true;
2121   }
2122   return false;
2123 }
2124 
2125 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2126 /// simplify operations downstream. Mask is known to be zero for bits that V
2127 /// cannot have.
2128 ///
2129 /// This function is defined on values with integer type, values with pointer
2130 /// type, and vectors of integers.  In the case
2131 /// where V is a vector, the mask, known zero, and known one values are the
2132 /// same width as the vector element, and the bit is set only if it is true
2133 /// for all of the elements in the vector.
2134 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2135                        const Query &Q) {
2136   KnownBits Known(Mask.getBitWidth());
2137   computeKnownBits(V, Known, Depth, Q);
2138   return Mask.isSubsetOf(Known.Zero);
2139 }
2140 
2141 /// For vector constants, loop over the elements and find the constant with the
2142 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2143 /// or if any element was not analyzed; otherwise, return the count for the
2144 /// element with the minimum number of sign bits.
2145 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2146                                                  unsigned TyBits) {
2147   const auto *CV = dyn_cast<Constant>(V);
2148   if (!CV || !CV->getType()->isVectorTy())
2149     return 0;
2150 
2151   unsigned MinSignBits = TyBits;
2152   unsigned NumElts = CV->getType()->getVectorNumElements();
2153   for (unsigned i = 0; i != NumElts; ++i) {
2154     // If we find a non-ConstantInt, bail out.
2155     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2156     if (!Elt)
2157       return 0;
2158 
2159     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2160   }
2161 
2162   return MinSignBits;
2163 }
2164 
2165 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2166                                        const Query &Q);
2167 
2168 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2169                                    const Query &Q) {
2170   unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2171   assert(Result > 0 && "At least one sign bit needs to be present!");
2172   return Result;
2173 }
2174 
2175 /// Return the number of times the sign bit of the register is replicated into
2176 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2177 /// (itself), but other cases can give us information. For example, immediately
2178 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2179 /// other, so we return 3. For vectors, return the number of sign bits for the
2180 /// vector element with the mininum number of known sign bits.
2181 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2182                                        const Query &Q) {
2183   assert(Depth <= MaxDepth && "Limit Search Depth");
2184 
2185   // We return the minimum number of sign bits that are guaranteed to be present
2186   // in V, so for undef we have to conservatively return 1.  We don't have the
2187   // same behavior for poison though -- that's a FIXME today.
2188 
2189   unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2190   unsigned Tmp, Tmp2;
2191   unsigned FirstAnswer = 1;
2192 
2193   // Note that ConstantInt is handled by the general computeKnownBits case
2194   // below.
2195 
2196   if (Depth == MaxDepth)
2197     return 1;  // Limit search depth.
2198 
2199   const Operator *U = dyn_cast<Operator>(V);
2200   switch (Operator::getOpcode(V)) {
2201   default: break;
2202   case Instruction::SExt:
2203     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2204     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2205 
2206   case Instruction::SDiv: {
2207     const APInt *Denominator;
2208     // sdiv X, C -> adds log(C) sign bits.
2209     if (match(U->getOperand(1), m_APInt(Denominator))) {
2210 
2211       // Ignore non-positive denominator.
2212       if (!Denominator->isStrictlyPositive())
2213         break;
2214 
2215       // Calculate the incoming numerator bits.
2216       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2217 
2218       // Add floor(log(C)) bits to the numerator bits.
2219       return std::min(TyBits, NumBits + Denominator->logBase2());
2220     }
2221     break;
2222   }
2223 
2224   case Instruction::SRem: {
2225     const APInt *Denominator;
2226     // srem X, C -> we know that the result is within [-C+1,C) when C is a
2227     // positive constant.  This let us put a lower bound on the number of sign
2228     // bits.
2229     if (match(U->getOperand(1), m_APInt(Denominator))) {
2230 
2231       // Ignore non-positive denominator.
2232       if (!Denominator->isStrictlyPositive())
2233         break;
2234 
2235       // Calculate the incoming numerator bits. SRem by a positive constant
2236       // can't lower the number of sign bits.
2237       unsigned NumrBits =
2238           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2239 
2240       // Calculate the leading sign bit constraints by examining the
2241       // denominator.  Given that the denominator is positive, there are two
2242       // cases:
2243       //
2244       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
2245       //     (1 << ceilLogBase2(C)).
2246       //
2247       //  2. the numerator is negative.  Then the result range is (-C,0] and
2248       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2249       //
2250       // Thus a lower bound on the number of sign bits is `TyBits -
2251       // ceilLogBase2(C)`.
2252 
2253       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2254       return std::max(NumrBits, ResBits);
2255     }
2256     break;
2257   }
2258 
2259   case Instruction::AShr: {
2260     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2261     // ashr X, C   -> adds C sign bits.  Vectors too.
2262     const APInt *ShAmt;
2263     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2264       if (ShAmt->uge(TyBits))
2265         break;  // Bad shift.
2266       unsigned ShAmtLimited = ShAmt->getZExtValue();
2267       Tmp += ShAmtLimited;
2268       if (Tmp > TyBits) Tmp = TyBits;
2269     }
2270     return Tmp;
2271   }
2272   case Instruction::Shl: {
2273     const APInt *ShAmt;
2274     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2275       // shl destroys sign bits.
2276       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2277       if (ShAmt->uge(TyBits) ||      // Bad shift.
2278           ShAmt->uge(Tmp)) break;    // Shifted all sign bits out.
2279       Tmp2 = ShAmt->getZExtValue();
2280       return Tmp - Tmp2;
2281     }
2282     break;
2283   }
2284   case Instruction::And:
2285   case Instruction::Or:
2286   case Instruction::Xor:    // NOT is handled here.
2287     // Logical binary ops preserve the number of sign bits at the worst.
2288     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2289     if (Tmp != 1) {
2290       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2291       FirstAnswer = std::min(Tmp, Tmp2);
2292       // We computed what we know about the sign bits as our first
2293       // answer. Now proceed to the generic code that uses
2294       // computeKnownBits, and pick whichever answer is better.
2295     }
2296     break;
2297 
2298   case Instruction::Select:
2299     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2300     if (Tmp == 1) return 1;  // Early out.
2301     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2302     return std::min(Tmp, Tmp2);
2303 
2304   case Instruction::Add:
2305     // Add can have at most one carry bit.  Thus we know that the output
2306     // is, at worst, one more bit than the inputs.
2307     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2308     if (Tmp == 1) return 1;  // Early out.
2309 
2310     // Special case decrementing a value (ADD X, -1):
2311     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2312       if (CRHS->isAllOnesValue()) {
2313         KnownBits Known(TyBits);
2314         computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2315 
2316         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2317         // sign bits set.
2318         if ((Known.Zero | 1).isAllOnesValue())
2319           return TyBits;
2320 
2321         // If we are subtracting one from a positive number, there is no carry
2322         // out of the result.
2323         if (Known.isNonNegative())
2324           return Tmp;
2325       }
2326 
2327     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2328     if (Tmp2 == 1) return 1;
2329     return std::min(Tmp, Tmp2)-1;
2330 
2331   case Instruction::Sub:
2332     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2333     if (Tmp2 == 1) return 1;
2334 
2335     // Handle NEG.
2336     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2337       if (CLHS->isNullValue()) {
2338         KnownBits Known(TyBits);
2339         computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2340         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2341         // sign bits set.
2342         if ((Known.Zero | 1).isAllOnesValue())
2343           return TyBits;
2344 
2345         // If the input is known to be positive (the sign bit is known clear),
2346         // the output of the NEG has the same number of sign bits as the input.
2347         if (Known.isNonNegative())
2348           return Tmp2;
2349 
2350         // Otherwise, we treat this like a SUB.
2351       }
2352 
2353     // Sub can have at most one carry bit.  Thus we know that the output
2354     // is, at worst, one more bit than the inputs.
2355     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2356     if (Tmp == 1) return 1;  // Early out.
2357     return std::min(Tmp, Tmp2)-1;
2358 
2359   case Instruction::Mul: {
2360     // The output of the Mul can be at most twice the valid bits in the inputs.
2361     unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2362     if (SignBitsOp0 == 1) return 1;  // Early out.
2363     unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2364     if (SignBitsOp1 == 1) return 1;
2365     unsigned OutValidBits =
2366         (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2367     return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2368   }
2369 
2370   case Instruction::PHI: {
2371     const PHINode *PN = cast<PHINode>(U);
2372     unsigned NumIncomingValues = PN->getNumIncomingValues();
2373     // Don't analyze large in-degree PHIs.
2374     if (NumIncomingValues > 4) break;
2375     // Unreachable blocks may have zero-operand PHI nodes.
2376     if (NumIncomingValues == 0) break;
2377 
2378     // Take the minimum of all incoming values.  This can't infinitely loop
2379     // because of our depth threshold.
2380     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2381     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2382       if (Tmp == 1) return Tmp;
2383       Tmp = std::min(
2384           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2385     }
2386     return Tmp;
2387   }
2388 
2389   case Instruction::Trunc:
2390     // FIXME: it's tricky to do anything useful for this, but it is an important
2391     // case for targets like X86.
2392     break;
2393 
2394   case Instruction::ExtractElement:
2395     // Look through extract element. At the moment we keep this simple and skip
2396     // tracking the specific element. But at least we might find information
2397     // valid for all elements of the vector (for example if vector is sign
2398     // extended, shifted, etc).
2399     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2400   }
2401 
2402   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2403   // use this information.
2404 
2405   // If we can examine all elements of a vector constant successfully, we're
2406   // done (we can't do any better than that). If not, keep trying.
2407   if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2408     return VecSignBits;
2409 
2410   KnownBits Known(TyBits);
2411   computeKnownBits(V, Known, Depth, Q);
2412 
2413   // If we know that the sign bit is either zero or one, determine the number of
2414   // identical bits in the top of the input value.
2415   return std::max(FirstAnswer, Known.countMinSignBits());
2416 }
2417 
2418 /// This function computes the integer multiple of Base that equals V.
2419 /// If successful, it returns true and returns the multiple in
2420 /// Multiple. If unsuccessful, it returns false. It looks
2421 /// through SExt instructions only if LookThroughSExt is true.
2422 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2423                            bool LookThroughSExt, unsigned Depth) {
2424   const unsigned MaxDepth = 6;
2425 
2426   assert(V && "No Value?");
2427   assert(Depth <= MaxDepth && "Limit Search Depth");
2428   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2429 
2430   Type *T = V->getType();
2431 
2432   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2433 
2434   if (Base == 0)
2435     return false;
2436 
2437   if (Base == 1) {
2438     Multiple = V;
2439     return true;
2440   }
2441 
2442   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2443   Constant *BaseVal = ConstantInt::get(T, Base);
2444   if (CO && CO == BaseVal) {
2445     // Multiple is 1.
2446     Multiple = ConstantInt::get(T, 1);
2447     return true;
2448   }
2449 
2450   if (CI && CI->getZExtValue() % Base == 0) {
2451     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2452     return true;
2453   }
2454 
2455   if (Depth == MaxDepth) return false;  // Limit search depth.
2456 
2457   Operator *I = dyn_cast<Operator>(V);
2458   if (!I) return false;
2459 
2460   switch (I->getOpcode()) {
2461   default: break;
2462   case Instruction::SExt:
2463     if (!LookThroughSExt) return false;
2464     // otherwise fall through to ZExt
2465     LLVM_FALLTHROUGH;
2466   case Instruction::ZExt:
2467     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2468                            LookThroughSExt, Depth+1);
2469   case Instruction::Shl:
2470   case Instruction::Mul: {
2471     Value *Op0 = I->getOperand(0);
2472     Value *Op1 = I->getOperand(1);
2473 
2474     if (I->getOpcode() == Instruction::Shl) {
2475       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2476       if (!Op1CI) return false;
2477       // Turn Op0 << Op1 into Op0 * 2^Op1
2478       APInt Op1Int = Op1CI->getValue();
2479       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2480       APInt API(Op1Int.getBitWidth(), 0);
2481       API.setBit(BitToSet);
2482       Op1 = ConstantInt::get(V->getContext(), API);
2483     }
2484 
2485     Value *Mul0 = nullptr;
2486     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2487       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2488         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2489           if (Op1C->getType()->getPrimitiveSizeInBits() <
2490               MulC->getType()->getPrimitiveSizeInBits())
2491             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2492           if (Op1C->getType()->getPrimitiveSizeInBits() >
2493               MulC->getType()->getPrimitiveSizeInBits())
2494             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2495 
2496           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2497           Multiple = ConstantExpr::getMul(MulC, Op1C);
2498           return true;
2499         }
2500 
2501       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2502         if (Mul0CI->getValue() == 1) {
2503           // V == Base * Op1, so return Op1
2504           Multiple = Op1;
2505           return true;
2506         }
2507     }
2508 
2509     Value *Mul1 = nullptr;
2510     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2511       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2512         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2513           if (Op0C->getType()->getPrimitiveSizeInBits() <
2514               MulC->getType()->getPrimitiveSizeInBits())
2515             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2516           if (Op0C->getType()->getPrimitiveSizeInBits() >
2517               MulC->getType()->getPrimitiveSizeInBits())
2518             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2519 
2520           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2521           Multiple = ConstantExpr::getMul(MulC, Op0C);
2522           return true;
2523         }
2524 
2525       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2526         if (Mul1CI->getValue() == 1) {
2527           // V == Base * Op0, so return Op0
2528           Multiple = Op0;
2529           return true;
2530         }
2531     }
2532   }
2533   }
2534 
2535   // We could not determine if V is a multiple of Base.
2536   return false;
2537 }
2538 
2539 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2540                                             const TargetLibraryInfo *TLI) {
2541   const Function *F = ICS.getCalledFunction();
2542   if (!F)
2543     return Intrinsic::not_intrinsic;
2544 
2545   if (F->isIntrinsic())
2546     return F->getIntrinsicID();
2547 
2548   if (!TLI)
2549     return Intrinsic::not_intrinsic;
2550 
2551   LibFunc Func;
2552   // We're going to make assumptions on the semantics of the functions, check
2553   // that the target knows that it's available in this environment and it does
2554   // not have local linkage.
2555   if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2556     return Intrinsic::not_intrinsic;
2557 
2558   if (!ICS.onlyReadsMemory())
2559     return Intrinsic::not_intrinsic;
2560 
2561   // Otherwise check if we have a call to a function that can be turned into a
2562   // vector intrinsic.
2563   switch (Func) {
2564   default:
2565     break;
2566   case LibFunc_sin:
2567   case LibFunc_sinf:
2568   case LibFunc_sinl:
2569     return Intrinsic::sin;
2570   case LibFunc_cos:
2571   case LibFunc_cosf:
2572   case LibFunc_cosl:
2573     return Intrinsic::cos;
2574   case LibFunc_exp:
2575   case LibFunc_expf:
2576   case LibFunc_expl:
2577     return Intrinsic::exp;
2578   case LibFunc_exp2:
2579   case LibFunc_exp2f:
2580   case LibFunc_exp2l:
2581     return Intrinsic::exp2;
2582   case LibFunc_log:
2583   case LibFunc_logf:
2584   case LibFunc_logl:
2585     return Intrinsic::log;
2586   case LibFunc_log10:
2587   case LibFunc_log10f:
2588   case LibFunc_log10l:
2589     return Intrinsic::log10;
2590   case LibFunc_log2:
2591   case LibFunc_log2f:
2592   case LibFunc_log2l:
2593     return Intrinsic::log2;
2594   case LibFunc_fabs:
2595   case LibFunc_fabsf:
2596   case LibFunc_fabsl:
2597     return Intrinsic::fabs;
2598   case LibFunc_fmin:
2599   case LibFunc_fminf:
2600   case LibFunc_fminl:
2601     return Intrinsic::minnum;
2602   case LibFunc_fmax:
2603   case LibFunc_fmaxf:
2604   case LibFunc_fmaxl:
2605     return Intrinsic::maxnum;
2606   case LibFunc_copysign:
2607   case LibFunc_copysignf:
2608   case LibFunc_copysignl:
2609     return Intrinsic::copysign;
2610   case LibFunc_floor:
2611   case LibFunc_floorf:
2612   case LibFunc_floorl:
2613     return Intrinsic::floor;
2614   case LibFunc_ceil:
2615   case LibFunc_ceilf:
2616   case LibFunc_ceill:
2617     return Intrinsic::ceil;
2618   case LibFunc_trunc:
2619   case LibFunc_truncf:
2620   case LibFunc_truncl:
2621     return Intrinsic::trunc;
2622   case LibFunc_rint:
2623   case LibFunc_rintf:
2624   case LibFunc_rintl:
2625     return Intrinsic::rint;
2626   case LibFunc_nearbyint:
2627   case LibFunc_nearbyintf:
2628   case LibFunc_nearbyintl:
2629     return Intrinsic::nearbyint;
2630   case LibFunc_round:
2631   case LibFunc_roundf:
2632   case LibFunc_roundl:
2633     return Intrinsic::round;
2634   case LibFunc_pow:
2635   case LibFunc_powf:
2636   case LibFunc_powl:
2637     return Intrinsic::pow;
2638   case LibFunc_sqrt:
2639   case LibFunc_sqrtf:
2640   case LibFunc_sqrtl:
2641     return Intrinsic::sqrt;
2642   }
2643 
2644   return Intrinsic::not_intrinsic;
2645 }
2646 
2647 /// Return true if we can prove that the specified FP value is never equal to
2648 /// -0.0.
2649 ///
2650 /// NOTE: this function will need to be revisited when we support non-default
2651 /// rounding modes!
2652 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2653                                 unsigned Depth) {
2654   if (auto *CFP = dyn_cast<ConstantFP>(V))
2655     return !CFP->getValueAPF().isNegZero();
2656 
2657   // Limit search depth.
2658   if (Depth == MaxDepth)
2659     return false;
2660 
2661   auto *Op = dyn_cast<Operator>(V);
2662   if (!Op)
2663     return false;
2664 
2665   // Check if the nsz fast-math flag is set.
2666   if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2667     if (FPO->hasNoSignedZeros())
2668       return true;
2669 
2670   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2671   if (match(Op, m_FAdd(m_Value(), m_Zero())))
2672     return true;
2673 
2674   // sitofp and uitofp turn into +0.0 for zero.
2675   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2676     return true;
2677 
2678   if (auto *Call = dyn_cast<CallInst>(Op)) {
2679     Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2680     switch (IID) {
2681     default:
2682       break;
2683     // sqrt(-0.0) = -0.0, no other negative results are possible.
2684     case Intrinsic::sqrt:
2685       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2686     // fabs(x) != -0.0
2687     case Intrinsic::fabs:
2688       return true;
2689     }
2690   }
2691 
2692   return false;
2693 }
2694 
2695 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2696 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2697 /// bit despite comparing equal.
2698 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2699                                             const TargetLibraryInfo *TLI,
2700                                             bool SignBitOnly,
2701                                             unsigned Depth) {
2702   // TODO: This function does not do the right thing when SignBitOnly is true
2703   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2704   // which flips the sign bits of NaNs.  See
2705   // https://llvm.org/bugs/show_bug.cgi?id=31702.
2706 
2707   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2708     return !CFP->getValueAPF().isNegative() ||
2709            (!SignBitOnly && CFP->getValueAPF().isZero());
2710   }
2711 
2712   if (Depth == MaxDepth)
2713     return false; // Limit search depth.
2714 
2715   const Operator *I = dyn_cast<Operator>(V);
2716   if (!I)
2717     return false;
2718 
2719   switch (I->getOpcode()) {
2720   default:
2721     break;
2722   // Unsigned integers are always nonnegative.
2723   case Instruction::UIToFP:
2724     return true;
2725   case Instruction::FMul:
2726     // x*x is always non-negative or a NaN.
2727     if (I->getOperand(0) == I->getOperand(1) &&
2728         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2729       return true;
2730 
2731     LLVM_FALLTHROUGH;
2732   case Instruction::FAdd:
2733   case Instruction::FDiv:
2734   case Instruction::FRem:
2735     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2736                                            Depth + 1) &&
2737            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2738                                            Depth + 1);
2739   case Instruction::Select:
2740     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2741                                            Depth + 1) &&
2742            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2743                                            Depth + 1);
2744   case Instruction::FPExt:
2745   case Instruction::FPTrunc:
2746     // Widening/narrowing never change sign.
2747     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2748                                            Depth + 1);
2749   case Instruction::Call:
2750     const auto *CI = cast<CallInst>(I);
2751     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2752     switch (IID) {
2753     default:
2754       break;
2755     case Intrinsic::maxnum:
2756       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2757                                              Depth + 1) ||
2758              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2759                                              Depth + 1);
2760     case Intrinsic::minnum:
2761       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2762                                              Depth + 1) &&
2763              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2764                                              Depth + 1);
2765     case Intrinsic::exp:
2766     case Intrinsic::exp2:
2767     case Intrinsic::fabs:
2768       return true;
2769 
2770     case Intrinsic::sqrt:
2771       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
2772       if (!SignBitOnly)
2773         return true;
2774       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2775                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
2776 
2777     case Intrinsic::powi:
2778       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2779         // powi(x,n) is non-negative if n is even.
2780         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2781           return true;
2782       }
2783       // TODO: This is not correct.  Given that exp is an integer, here are the
2784       // ways that pow can return a negative value:
2785       //
2786       //   pow(x, exp)    --> negative if exp is odd and x is negative.
2787       //   pow(-0, exp)   --> -inf if exp is negative odd.
2788       //   pow(-0, exp)   --> -0 if exp is positive odd.
2789       //   pow(-inf, exp) --> -0 if exp is negative odd.
2790       //   pow(-inf, exp) --> -inf if exp is positive odd.
2791       //
2792       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2793       // but we must return false if x == -0.  Unfortunately we do not currently
2794       // have a way of expressing this constraint.  See details in
2795       // https://llvm.org/bugs/show_bug.cgi?id=31702.
2796       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2797                                              Depth + 1);
2798 
2799     case Intrinsic::fma:
2800     case Intrinsic::fmuladd:
2801       // x*x+y is non-negative if y is non-negative.
2802       return I->getOperand(0) == I->getOperand(1) &&
2803              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2804              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2805                                              Depth + 1);
2806     }
2807     break;
2808   }
2809   return false;
2810 }
2811 
2812 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2813                                        const TargetLibraryInfo *TLI) {
2814   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2815 }
2816 
2817 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
2818   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2819 }
2820 
2821 bool llvm::isKnownNeverNaN(const Value *V) {
2822   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
2823 
2824   // If we're told that NaNs won't happen, assume they won't.
2825   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
2826     if (FPMathOp->hasNoNaNs())
2827       return true;
2828 
2829   // TODO: Handle instructions and potentially recurse like other 'isKnown'
2830   // functions. For example, the result of sitofp is never NaN.
2831 
2832   // Handle scalar constants.
2833   if (auto *CFP = dyn_cast<ConstantFP>(V))
2834     return !CFP->isNaN();
2835 
2836   // Bail out for constant expressions, but try to handle vector constants.
2837   if (!V->getType()->isVectorTy() || !isa<Constant>(V))
2838     return false;
2839 
2840   // For vectors, verify that each element is not NaN.
2841   unsigned NumElts = V->getType()->getVectorNumElements();
2842   for (unsigned i = 0; i != NumElts; ++i) {
2843     Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
2844     if (!Elt)
2845       return false;
2846     if (isa<UndefValue>(Elt))
2847       continue;
2848     auto *CElt = dyn_cast<ConstantFP>(Elt);
2849     if (!CElt || CElt->isNaN())
2850       return false;
2851   }
2852   // All elements were confirmed not-NaN or undefined.
2853   return true;
2854 }
2855 
2856 /// If the specified value can be set by repeating the same byte in memory,
2857 /// return the i8 value that it is represented with.  This is
2858 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2859 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
2860 /// byte store (e.g. i16 0x1234), return null.
2861 Value *llvm::isBytewiseValue(Value *V) {
2862   // All byte-wide stores are splatable, even of arbitrary variables.
2863   if (V->getType()->isIntegerTy(8)) return V;
2864 
2865   // Handle 'null' ConstantArrayZero etc.
2866   if (Constant *C = dyn_cast<Constant>(V))
2867     if (C->isNullValue())
2868       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2869 
2870   // Constant float and double values can be handled as integer values if the
2871   // corresponding integer value is "byteable".  An important case is 0.0.
2872   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2873     if (CFP->getType()->isFloatTy())
2874       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2875     if (CFP->getType()->isDoubleTy())
2876       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2877     // Don't handle long double formats, which have strange constraints.
2878   }
2879 
2880   // We can handle constant integers that are multiple of 8 bits.
2881   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2882     if (CI->getBitWidth() % 8 == 0) {
2883       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2884 
2885       if (!CI->getValue().isSplat(8))
2886         return nullptr;
2887       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2888     }
2889   }
2890 
2891   // A ConstantDataArray/Vector is splatable if all its members are equal and
2892   // also splatable.
2893   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2894     Value *Elt = CA->getElementAsConstant(0);
2895     Value *Val = isBytewiseValue(Elt);
2896     if (!Val)
2897       return nullptr;
2898 
2899     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2900       if (CA->getElementAsConstant(I) != Elt)
2901         return nullptr;
2902 
2903     return Val;
2904   }
2905 
2906   // Conceptually, we could handle things like:
2907   //   %a = zext i8 %X to i16
2908   //   %b = shl i16 %a, 8
2909   //   %c = or i16 %a, %b
2910   // but until there is an example that actually needs this, it doesn't seem
2911   // worth worrying about.
2912   return nullptr;
2913 }
2914 
2915 // This is the recursive version of BuildSubAggregate. It takes a few different
2916 // arguments. Idxs is the index within the nested struct From that we are
2917 // looking at now (which is of type IndexedType). IdxSkip is the number of
2918 // indices from Idxs that should be left out when inserting into the resulting
2919 // struct. To is the result struct built so far, new insertvalue instructions
2920 // build on that.
2921 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2922                                 SmallVectorImpl<unsigned> &Idxs,
2923                                 unsigned IdxSkip,
2924                                 Instruction *InsertBefore) {
2925   StructType *STy = dyn_cast<StructType>(IndexedType);
2926   if (STy) {
2927     // Save the original To argument so we can modify it
2928     Value *OrigTo = To;
2929     // General case, the type indexed by Idxs is a struct
2930     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2931       // Process each struct element recursively
2932       Idxs.push_back(i);
2933       Value *PrevTo = To;
2934       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2935                              InsertBefore);
2936       Idxs.pop_back();
2937       if (!To) {
2938         // Couldn't find any inserted value for this index? Cleanup
2939         while (PrevTo != OrigTo) {
2940           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2941           PrevTo = Del->getAggregateOperand();
2942           Del->eraseFromParent();
2943         }
2944         // Stop processing elements
2945         break;
2946       }
2947     }
2948     // If we successfully found a value for each of our subaggregates
2949     if (To)
2950       return To;
2951   }
2952   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2953   // the struct's elements had a value that was inserted directly. In the latter
2954   // case, perhaps we can't determine each of the subelements individually, but
2955   // we might be able to find the complete struct somewhere.
2956 
2957   // Find the value that is at that particular spot
2958   Value *V = FindInsertedValue(From, Idxs);
2959 
2960   if (!V)
2961     return nullptr;
2962 
2963   // Insert the value in the new (sub) aggregrate
2964   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2965                                  "tmp", InsertBefore);
2966 }
2967 
2968 // This helper takes a nested struct and extracts a part of it (which is again a
2969 // struct) into a new value. For example, given the struct:
2970 // { a, { b, { c, d }, e } }
2971 // and the indices "1, 1" this returns
2972 // { c, d }.
2973 //
2974 // It does this by inserting an insertvalue for each element in the resulting
2975 // struct, as opposed to just inserting a single struct. This will only work if
2976 // each of the elements of the substruct are known (ie, inserted into From by an
2977 // insertvalue instruction somewhere).
2978 //
2979 // All inserted insertvalue instructions are inserted before InsertBefore
2980 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2981                                 Instruction *InsertBefore) {
2982   assert(InsertBefore && "Must have someplace to insert!");
2983   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2984                                                              idx_range);
2985   Value *To = UndefValue::get(IndexedType);
2986   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2987   unsigned IdxSkip = Idxs.size();
2988 
2989   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2990 }
2991 
2992 /// Given an aggregrate and an sequence of indices, see if
2993 /// the scalar value indexed is already around as a register, for example if it
2994 /// were inserted directly into the aggregrate.
2995 ///
2996 /// If InsertBefore is not null, this function will duplicate (modified)
2997 /// insertvalues when a part of a nested struct is extracted.
2998 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2999                                Instruction *InsertBefore) {
3000   // Nothing to index? Just return V then (this is useful at the end of our
3001   // recursion).
3002   if (idx_range.empty())
3003     return V;
3004   // We have indices, so V should have an indexable type.
3005   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3006          "Not looking at a struct or array?");
3007   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3008          "Invalid indices for type?");
3009 
3010   if (Constant *C = dyn_cast<Constant>(V)) {
3011     C = C->getAggregateElement(idx_range[0]);
3012     if (!C) return nullptr;
3013     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3014   }
3015 
3016   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3017     // Loop the indices for the insertvalue instruction in parallel with the
3018     // requested indices
3019     const unsigned *req_idx = idx_range.begin();
3020     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3021          i != e; ++i, ++req_idx) {
3022       if (req_idx == idx_range.end()) {
3023         // We can't handle this without inserting insertvalues
3024         if (!InsertBefore)
3025           return nullptr;
3026 
3027         // The requested index identifies a part of a nested aggregate. Handle
3028         // this specially. For example,
3029         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3030         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3031         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3032         // This can be changed into
3033         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3034         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3035         // which allows the unused 0,0 element from the nested struct to be
3036         // removed.
3037         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3038                                  InsertBefore);
3039       }
3040 
3041       // This insert value inserts something else than what we are looking for.
3042       // See if the (aggregate) value inserted into has the value we are
3043       // looking for, then.
3044       if (*req_idx != *i)
3045         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3046                                  InsertBefore);
3047     }
3048     // If we end up here, the indices of the insertvalue match with those
3049     // requested (though possibly only partially). Now we recursively look at
3050     // the inserted value, passing any remaining indices.
3051     return FindInsertedValue(I->getInsertedValueOperand(),
3052                              makeArrayRef(req_idx, idx_range.end()),
3053                              InsertBefore);
3054   }
3055 
3056   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3057     // If we're extracting a value from an aggregate that was extracted from
3058     // something else, we can extract from that something else directly instead.
3059     // However, we will need to chain I's indices with the requested indices.
3060 
3061     // Calculate the number of indices required
3062     unsigned size = I->getNumIndices() + idx_range.size();
3063     // Allocate some space to put the new indices in
3064     SmallVector<unsigned, 5> Idxs;
3065     Idxs.reserve(size);
3066     // Add indices from the extract value instruction
3067     Idxs.append(I->idx_begin(), I->idx_end());
3068 
3069     // Add requested indices
3070     Idxs.append(idx_range.begin(), idx_range.end());
3071 
3072     assert(Idxs.size() == size
3073            && "Number of indices added not correct?");
3074 
3075     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3076   }
3077   // Otherwise, we don't know (such as, extracting from a function return value
3078   // or load instruction)
3079   return nullptr;
3080 }
3081 
3082 /// Analyze the specified pointer to see if it can be expressed as a base
3083 /// pointer plus a constant offset. Return the base and offset to the caller.
3084 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
3085                                               const DataLayout &DL) {
3086   unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
3087   APInt ByteOffset(BitWidth, 0);
3088 
3089   // We walk up the defs but use a visited set to handle unreachable code. In
3090   // that case, we stop after accumulating the cycle once (not that it
3091   // matters).
3092   SmallPtrSet<Value *, 16> Visited;
3093   while (Visited.insert(Ptr).second) {
3094     if (Ptr->getType()->isVectorTy())
3095       break;
3096 
3097     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
3098       // If one of the values we have visited is an addrspacecast, then
3099       // the pointer type of this GEP may be different from the type
3100       // of the Ptr parameter which was passed to this function.  This
3101       // means when we construct GEPOffset, we need to use the size
3102       // of GEP's pointer type rather than the size of the original
3103       // pointer type.
3104       APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
3105       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
3106         break;
3107 
3108       ByteOffset += GEPOffset.getSExtValue();
3109 
3110       Ptr = GEP->getPointerOperand();
3111     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
3112                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
3113       Ptr = cast<Operator>(Ptr)->getOperand(0);
3114     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
3115       if (GA->isInterposable())
3116         break;
3117       Ptr = GA->getAliasee();
3118     } else {
3119       break;
3120     }
3121   }
3122   Offset = ByteOffset.getSExtValue();
3123   return Ptr;
3124 }
3125 
3126 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3127                                        unsigned CharSize) {
3128   // Make sure the GEP has exactly three arguments.
3129   if (GEP->getNumOperands() != 3)
3130     return false;
3131 
3132   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3133   // CharSize.
3134   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3135   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3136     return false;
3137 
3138   // Check to make sure that the first operand of the GEP is an integer and
3139   // has value 0 so that we are sure we're indexing into the initializer.
3140   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3141   if (!FirstIdx || !FirstIdx->isZero())
3142     return false;
3143 
3144   return true;
3145 }
3146 
3147 bool llvm::getConstantDataArrayInfo(const Value *V,
3148                                     ConstantDataArraySlice &Slice,
3149                                     unsigned ElementSize, uint64_t Offset) {
3150   assert(V);
3151 
3152   // Look through bitcast instructions and geps.
3153   V = V->stripPointerCasts();
3154 
3155   // If the value is a GEP instruction or constant expression, treat it as an
3156   // offset.
3157   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3158     // The GEP operator should be based on a pointer to string constant, and is
3159     // indexing into the string constant.
3160     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3161       return false;
3162 
3163     // If the second index isn't a ConstantInt, then this is a variable index
3164     // into the array.  If this occurs, we can't say anything meaningful about
3165     // the string.
3166     uint64_t StartIdx = 0;
3167     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3168       StartIdx = CI->getZExtValue();
3169     else
3170       return false;
3171     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3172                                     StartIdx + Offset);
3173   }
3174 
3175   // The GEP instruction, constant or instruction, must reference a global
3176   // variable that is a constant and is initialized. The referenced constant
3177   // initializer is the array that we'll use for optimization.
3178   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3179   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3180     return false;
3181 
3182   const ConstantDataArray *Array;
3183   ArrayType *ArrayTy;
3184   if (GV->getInitializer()->isNullValue()) {
3185     Type *GVTy = GV->getValueType();
3186     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3187       // A zeroinitializer for the array; there is no ConstantDataArray.
3188       Array = nullptr;
3189     } else {
3190       const DataLayout &DL = GV->getParent()->getDataLayout();
3191       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3192       uint64_t Length = SizeInBytes / (ElementSize / 8);
3193       if (Length <= Offset)
3194         return false;
3195 
3196       Slice.Array = nullptr;
3197       Slice.Offset = 0;
3198       Slice.Length = Length - Offset;
3199       return true;
3200     }
3201   } else {
3202     // This must be a ConstantDataArray.
3203     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3204     if (!Array)
3205       return false;
3206     ArrayTy = Array->getType();
3207   }
3208   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3209     return false;
3210 
3211   uint64_t NumElts = ArrayTy->getArrayNumElements();
3212   if (Offset > NumElts)
3213     return false;
3214 
3215   Slice.Array = Array;
3216   Slice.Offset = Offset;
3217   Slice.Length = NumElts - Offset;
3218   return true;
3219 }
3220 
3221 /// This function computes the length of a null-terminated C string pointed to
3222 /// by V. If successful, it returns true and returns the string in Str.
3223 /// If unsuccessful, it returns false.
3224 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3225                                  uint64_t Offset, bool TrimAtNul) {
3226   ConstantDataArraySlice Slice;
3227   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3228     return false;
3229 
3230   if (Slice.Array == nullptr) {
3231     if (TrimAtNul) {
3232       Str = StringRef();
3233       return true;
3234     }
3235     if (Slice.Length == 1) {
3236       Str = StringRef("", 1);
3237       return true;
3238     }
3239     // We cannot instantiate a StringRef as we do not have an appropriate string
3240     // of 0s at hand.
3241     return false;
3242   }
3243 
3244   // Start out with the entire array in the StringRef.
3245   Str = Slice.Array->getAsString();
3246   // Skip over 'offset' bytes.
3247   Str = Str.substr(Slice.Offset);
3248 
3249   if (TrimAtNul) {
3250     // Trim off the \0 and anything after it.  If the array is not nul
3251     // terminated, we just return the whole end of string.  The client may know
3252     // some other way that the string is length-bound.
3253     Str = Str.substr(0, Str.find('\0'));
3254   }
3255   return true;
3256 }
3257 
3258 // These next two are very similar to the above, but also look through PHI
3259 // nodes.
3260 // TODO: See if we can integrate these two together.
3261 
3262 /// If we can compute the length of the string pointed to by
3263 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3264 static uint64_t GetStringLengthH(const Value *V,
3265                                  SmallPtrSetImpl<const PHINode*> &PHIs,
3266                                  unsigned CharSize) {
3267   // Look through noop bitcast instructions.
3268   V = V->stripPointerCasts();
3269 
3270   // If this is a PHI node, there are two cases: either we have already seen it
3271   // or we haven't.
3272   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3273     if (!PHIs.insert(PN).second)
3274       return ~0ULL;  // already in the set.
3275 
3276     // If it was new, see if all the input strings are the same length.
3277     uint64_t LenSoFar = ~0ULL;
3278     for (Value *IncValue : PN->incoming_values()) {
3279       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3280       if (Len == 0) return 0; // Unknown length -> unknown.
3281 
3282       if (Len == ~0ULL) continue;
3283 
3284       if (Len != LenSoFar && LenSoFar != ~0ULL)
3285         return 0;    // Disagree -> unknown.
3286       LenSoFar = Len;
3287     }
3288 
3289     // Success, all agree.
3290     return LenSoFar;
3291   }
3292 
3293   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3294   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3295     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3296     if (Len1 == 0) return 0;
3297     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3298     if (Len2 == 0) return 0;
3299     if (Len1 == ~0ULL) return Len2;
3300     if (Len2 == ~0ULL) return Len1;
3301     if (Len1 != Len2) return 0;
3302     return Len1;
3303   }
3304 
3305   // Otherwise, see if we can read the string.
3306   ConstantDataArraySlice Slice;
3307   if (!getConstantDataArrayInfo(V, Slice, CharSize))
3308     return 0;
3309 
3310   if (Slice.Array == nullptr)
3311     return 1;
3312 
3313   // Search for nul characters
3314   unsigned NullIndex = 0;
3315   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3316     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3317       break;
3318   }
3319 
3320   return NullIndex + 1;
3321 }
3322 
3323 /// If we can compute the length of the string pointed to by
3324 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3325 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3326   if (!V->getType()->isPointerTy()) return 0;
3327 
3328   SmallPtrSet<const PHINode*, 32> PHIs;
3329   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3330   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3331   // an empty string as a length.
3332   return Len == ~0ULL ? 1 : Len;
3333 }
3334 
3335 /// \brief \p PN defines a loop-variant pointer to an object.  Check if the
3336 /// previous iteration of the loop was referring to the same object as \p PN.
3337 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3338                                          const LoopInfo *LI) {
3339   // Find the loop-defined value.
3340   Loop *L = LI->getLoopFor(PN->getParent());
3341   if (PN->getNumIncomingValues() != 2)
3342     return true;
3343 
3344   // Find the value from previous iteration.
3345   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3346   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3347     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3348   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3349     return true;
3350 
3351   // If a new pointer is loaded in the loop, the pointer references a different
3352   // object in every iteration.  E.g.:
3353   //    for (i)
3354   //       int *p = a[i];
3355   //       ...
3356   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3357     if (!L->isLoopInvariant(Load->getPointerOperand()))
3358       return false;
3359   return true;
3360 }
3361 
3362 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3363                                  unsigned MaxLookup) {
3364   if (!V->getType()->isPointerTy())
3365     return V;
3366   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3367     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3368       V = GEP->getPointerOperand();
3369     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3370                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3371       V = cast<Operator>(V)->getOperand(0);
3372     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3373       if (GA->isInterposable())
3374         return V;
3375       V = GA->getAliasee();
3376     } else if (isa<AllocaInst>(V)) {
3377       // An alloca can't be further simplified.
3378       return V;
3379     } else {
3380       if (auto CS = CallSite(V))
3381         if (Value *RV = CS.getReturnedArgOperand()) {
3382           V = RV;
3383           continue;
3384         }
3385 
3386       // See if InstructionSimplify knows any relevant tricks.
3387       if (Instruction *I = dyn_cast<Instruction>(V))
3388         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3389         if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3390           V = Simplified;
3391           continue;
3392         }
3393 
3394       return V;
3395     }
3396     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3397   }
3398   return V;
3399 }
3400 
3401 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3402                                 const DataLayout &DL, LoopInfo *LI,
3403                                 unsigned MaxLookup) {
3404   SmallPtrSet<Value *, 4> Visited;
3405   SmallVector<Value *, 4> Worklist;
3406   Worklist.push_back(V);
3407   do {
3408     Value *P = Worklist.pop_back_val();
3409     P = GetUnderlyingObject(P, DL, MaxLookup);
3410 
3411     if (!Visited.insert(P).second)
3412       continue;
3413 
3414     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3415       Worklist.push_back(SI->getTrueValue());
3416       Worklist.push_back(SI->getFalseValue());
3417       continue;
3418     }
3419 
3420     if (PHINode *PN = dyn_cast<PHINode>(P)) {
3421       // If this PHI changes the underlying object in every iteration of the
3422       // loop, don't look through it.  Consider:
3423       //   int **A;
3424       //   for (i) {
3425       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3426       //     Curr = A[i];
3427       //     *Prev, *Curr;
3428       //
3429       // Prev is tracking Curr one iteration behind so they refer to different
3430       // underlying objects.
3431       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3432           isSameUnderlyingObjectInLoop(PN, LI))
3433         for (Value *IncValue : PN->incoming_values())
3434           Worklist.push_back(IncValue);
3435       continue;
3436     }
3437 
3438     Objects.push_back(P);
3439   } while (!Worklist.empty());
3440 }
3441 
3442 /// This is the function that does the work of looking through basic
3443 /// ptrtoint+arithmetic+inttoptr sequences.
3444 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3445   do {
3446     if (const Operator *U = dyn_cast<Operator>(V)) {
3447       // If we find a ptrtoint, we can transfer control back to the
3448       // regular getUnderlyingObjectFromInt.
3449       if (U->getOpcode() == Instruction::PtrToInt)
3450         return U->getOperand(0);
3451       // If we find an add of a constant, a multiplied value, or a phi, it's
3452       // likely that the other operand will lead us to the base
3453       // object. We don't have to worry about the case where the
3454       // object address is somehow being computed by the multiply,
3455       // because our callers only care when the result is an
3456       // identifiable object.
3457       if (U->getOpcode() != Instruction::Add ||
3458           (!isa<ConstantInt>(U->getOperand(1)) &&
3459            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3460            !isa<PHINode>(U->getOperand(1))))
3461         return V;
3462       V = U->getOperand(0);
3463     } else {
3464       return V;
3465     }
3466     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3467   } while (true);
3468 }
3469 
3470 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3471 /// ptrtoint+arithmetic+inttoptr sequences.
3472 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3473 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
3474                           SmallVectorImpl<Value *> &Objects,
3475                           const DataLayout &DL) {
3476   SmallPtrSet<const Value *, 16> Visited;
3477   SmallVector<const Value *, 4> Working(1, V);
3478   do {
3479     V = Working.pop_back_val();
3480 
3481     SmallVector<Value *, 4> Objs;
3482     GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
3483 
3484     for (Value *V : Objs) {
3485       if (!Visited.insert(V).second)
3486         continue;
3487       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3488         const Value *O =
3489           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3490         if (O->getType()->isPointerTy()) {
3491           Working.push_back(O);
3492           continue;
3493         }
3494       }
3495       // If GetUnderlyingObjects fails to find an identifiable object,
3496       // getUnderlyingObjectsForCodeGen also fails for safety.
3497       if (!isIdentifiedObject(V)) {
3498         Objects.clear();
3499         return false;
3500       }
3501       Objects.push_back(const_cast<Value *>(V));
3502     }
3503   } while (!Working.empty());
3504   return true;
3505 }
3506 
3507 /// Return true if the only users of this pointer are lifetime markers.
3508 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3509   for (const User *U : V->users()) {
3510     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3511     if (!II) return false;
3512 
3513     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3514         II->getIntrinsicID() != Intrinsic::lifetime_end)
3515       return false;
3516   }
3517   return true;
3518 }
3519 
3520 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3521                                         const Instruction *CtxI,
3522                                         const DominatorTree *DT) {
3523   const Operator *Inst = dyn_cast<Operator>(V);
3524   if (!Inst)
3525     return false;
3526 
3527   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3528     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3529       if (C->canTrap())
3530         return false;
3531 
3532   switch (Inst->getOpcode()) {
3533   default:
3534     return true;
3535   case Instruction::UDiv:
3536   case Instruction::URem: {
3537     // x / y is undefined if y == 0.
3538     const APInt *V;
3539     if (match(Inst->getOperand(1), m_APInt(V)))
3540       return *V != 0;
3541     return false;
3542   }
3543   case Instruction::SDiv:
3544   case Instruction::SRem: {
3545     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3546     const APInt *Numerator, *Denominator;
3547     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3548       return false;
3549     // We cannot hoist this division if the denominator is 0.
3550     if (*Denominator == 0)
3551       return false;
3552     // It's safe to hoist if the denominator is not 0 or -1.
3553     if (*Denominator != -1)
3554       return true;
3555     // At this point we know that the denominator is -1.  It is safe to hoist as
3556     // long we know that the numerator is not INT_MIN.
3557     if (match(Inst->getOperand(0), m_APInt(Numerator)))
3558       return !Numerator->isMinSignedValue();
3559     // The numerator *might* be MinSignedValue.
3560     return false;
3561   }
3562   case Instruction::Load: {
3563     const LoadInst *LI = cast<LoadInst>(Inst);
3564     if (!LI->isUnordered() ||
3565         // Speculative load may create a race that did not exist in the source.
3566         LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3567         // Speculative load may load data from dirty regions.
3568         LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3569         LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3570       return false;
3571     const DataLayout &DL = LI->getModule()->getDataLayout();
3572     return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3573                                               LI->getAlignment(), DL, CtxI, DT);
3574   }
3575   case Instruction::Call: {
3576     auto *CI = cast<const CallInst>(Inst);
3577     const Function *Callee = CI->getCalledFunction();
3578 
3579     // The called function could have undefined behavior or side-effects, even
3580     // if marked readnone nounwind.
3581     return Callee && Callee->isSpeculatable();
3582   }
3583   case Instruction::VAArg:
3584   case Instruction::Alloca:
3585   case Instruction::Invoke:
3586   case Instruction::PHI:
3587   case Instruction::Store:
3588   case Instruction::Ret:
3589   case Instruction::Br:
3590   case Instruction::IndirectBr:
3591   case Instruction::Switch:
3592   case Instruction::Unreachable:
3593   case Instruction::Fence:
3594   case Instruction::AtomicRMW:
3595   case Instruction::AtomicCmpXchg:
3596   case Instruction::LandingPad:
3597   case Instruction::Resume:
3598   case Instruction::CatchSwitch:
3599   case Instruction::CatchPad:
3600   case Instruction::CatchRet:
3601   case Instruction::CleanupPad:
3602   case Instruction::CleanupRet:
3603     return false; // Misc instructions which have effects
3604   }
3605 }
3606 
3607 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3608   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3609 }
3610 
3611 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3612                                                    const Value *RHS,
3613                                                    const DataLayout &DL,
3614                                                    AssumptionCache *AC,
3615                                                    const Instruction *CxtI,
3616                                                    const DominatorTree *DT) {
3617   // Multiplying n * m significant bits yields a result of n + m significant
3618   // bits. If the total number of significant bits does not exceed the
3619   // result bit width (minus 1), there is no overflow.
3620   // This means if we have enough leading zero bits in the operands
3621   // we can guarantee that the result does not overflow.
3622   // Ref: "Hacker's Delight" by Henry Warren
3623   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3624   KnownBits LHSKnown(BitWidth);
3625   KnownBits RHSKnown(BitWidth);
3626   computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3627   computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3628   // Note that underestimating the number of zero bits gives a more
3629   // conservative answer.
3630   unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
3631                       RHSKnown.countMinLeadingZeros();
3632   // First handle the easy case: if we have enough zero bits there's
3633   // definitely no overflow.
3634   if (ZeroBits >= BitWidth)
3635     return OverflowResult::NeverOverflows;
3636 
3637   // Get the largest possible values for each operand.
3638   APInt LHSMax = ~LHSKnown.Zero;
3639   APInt RHSMax = ~RHSKnown.Zero;
3640 
3641   // We know the multiply operation doesn't overflow if the maximum values for
3642   // each operand will not overflow after we multiply them together.
3643   bool MaxOverflow;
3644   (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3645   if (!MaxOverflow)
3646     return OverflowResult::NeverOverflows;
3647 
3648   // We know it always overflows if multiplying the smallest possible values for
3649   // the operands also results in overflow.
3650   bool MinOverflow;
3651   (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
3652   if (MinOverflow)
3653     return OverflowResult::AlwaysOverflows;
3654 
3655   return OverflowResult::MayOverflow;
3656 }
3657 
3658 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3659                                                    const Value *RHS,
3660                                                    const DataLayout &DL,
3661                                                    AssumptionCache *AC,
3662                                                    const Instruction *CxtI,
3663                                                    const DominatorTree *DT) {
3664   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3665   if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
3666     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3667 
3668     if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
3669       // The sign bit is set in both cases: this MUST overflow.
3670       // Create a simple add instruction, and insert it into the struct.
3671       return OverflowResult::AlwaysOverflows;
3672     }
3673 
3674     if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
3675       // The sign bit is clear in both cases: this CANNOT overflow.
3676       // Create a simple add instruction, and insert it into the struct.
3677       return OverflowResult::NeverOverflows;
3678     }
3679   }
3680 
3681   return OverflowResult::MayOverflow;
3682 }
3683 
3684 /// \brief Return true if we can prove that adding the two values of the
3685 /// knownbits will not overflow.
3686 /// Otherwise return false.
3687 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
3688                                     const KnownBits &RHSKnown) {
3689   // Addition of two 2's complement numbers having opposite signs will never
3690   // overflow.
3691   if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
3692       (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
3693     return true;
3694 
3695   // If either of the values is known to be non-negative, adding them can only
3696   // overflow if the second is also non-negative, so we can assume that.
3697   // Two non-negative numbers will only overflow if there is a carry to the
3698   // sign bit, so we can check if even when the values are as big as possible
3699   // there is no overflow to the sign bit.
3700   if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
3701     APInt MaxLHS = ~LHSKnown.Zero;
3702     MaxLHS.clearSignBit();
3703     APInt MaxRHS = ~RHSKnown.Zero;
3704     MaxRHS.clearSignBit();
3705     APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
3706     return Result.isSignBitClear();
3707   }
3708 
3709   // If either of the values is known to be negative, adding them can only
3710   // overflow if the second is also negative, so we can assume that.
3711   // Two negative number will only overflow if there is no carry to the sign
3712   // bit, so we can check if even when the values are as small as possible
3713   // there is overflow to the sign bit.
3714   if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
3715     APInt MinLHS = LHSKnown.One;
3716     MinLHS.clearSignBit();
3717     APInt MinRHS = RHSKnown.One;
3718     MinRHS.clearSignBit();
3719     APInt Result = std::move(MinLHS) + std::move(MinRHS);
3720     return Result.isSignBitSet();
3721   }
3722 
3723   // If we reached here it means that we know nothing about the sign bits.
3724   // In this case we can't know if there will be an overflow, since by
3725   // changing the sign bits any two values can be made to overflow.
3726   return false;
3727 }
3728 
3729 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3730                                                   const Value *RHS,
3731                                                   const AddOperator *Add,
3732                                                   const DataLayout &DL,
3733                                                   AssumptionCache *AC,
3734                                                   const Instruction *CxtI,
3735                                                   const DominatorTree *DT) {
3736   if (Add && Add->hasNoSignedWrap()) {
3737     return OverflowResult::NeverOverflows;
3738   }
3739 
3740   // If LHS and RHS each have at least two sign bits, the addition will look
3741   // like
3742   //
3743   // XX..... +
3744   // YY.....
3745   //
3746   // If the carry into the most significant position is 0, X and Y can't both
3747   // be 1 and therefore the carry out of the addition is also 0.
3748   //
3749   // If the carry into the most significant position is 1, X and Y can't both
3750   // be 0 and therefore the carry out of the addition is also 1.
3751   //
3752   // Since the carry into the most significant position is always equal to
3753   // the carry out of the addition, there is no signed overflow.
3754   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3755       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3756     return OverflowResult::NeverOverflows;
3757 
3758   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3759   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3760 
3761   if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
3762     return OverflowResult::NeverOverflows;
3763 
3764   // The remaining code needs Add to be available. Early returns if not so.
3765   if (!Add)
3766     return OverflowResult::MayOverflow;
3767 
3768   // If the sign of Add is the same as at least one of the operands, this add
3769   // CANNOT overflow. This is particularly useful when the sum is
3770   // @llvm.assume'ed non-negative rather than proved so from analyzing its
3771   // operands.
3772   bool LHSOrRHSKnownNonNegative =
3773       (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
3774   bool LHSOrRHSKnownNegative =
3775       (LHSKnown.isNegative() || RHSKnown.isNegative());
3776   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3777     KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
3778     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
3779         (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
3780       return OverflowResult::NeverOverflows;
3781     }
3782   }
3783 
3784   return OverflowResult::MayOverflow;
3785 }
3786 
3787 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3788                                      const DominatorTree &DT) {
3789 #ifndef NDEBUG
3790   auto IID = II->getIntrinsicID();
3791   assert((IID == Intrinsic::sadd_with_overflow ||
3792           IID == Intrinsic::uadd_with_overflow ||
3793           IID == Intrinsic::ssub_with_overflow ||
3794           IID == Intrinsic::usub_with_overflow ||
3795           IID == Intrinsic::smul_with_overflow ||
3796           IID == Intrinsic::umul_with_overflow) &&
3797          "Not an overflow intrinsic!");
3798 #endif
3799 
3800   SmallVector<const BranchInst *, 2> GuardingBranches;
3801   SmallVector<const ExtractValueInst *, 2> Results;
3802 
3803   for (const User *U : II->users()) {
3804     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3805       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3806 
3807       if (EVI->getIndices()[0] == 0)
3808         Results.push_back(EVI);
3809       else {
3810         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3811 
3812         for (const auto *U : EVI->users())
3813           if (const auto *B = dyn_cast<BranchInst>(U)) {
3814             assert(B->isConditional() && "How else is it using an i1?");
3815             GuardingBranches.push_back(B);
3816           }
3817       }
3818     } else {
3819       // We are using the aggregate directly in a way we don't want to analyze
3820       // here (storing it to a global, say).
3821       return false;
3822     }
3823   }
3824 
3825   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3826     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3827     if (!NoWrapEdge.isSingleEdge())
3828       return false;
3829 
3830     // Check if all users of the add are provably no-wrap.
3831     for (const auto *Result : Results) {
3832       // If the extractvalue itself is not executed on overflow, the we don't
3833       // need to check each use separately, since domination is transitive.
3834       if (DT.dominates(NoWrapEdge, Result->getParent()))
3835         continue;
3836 
3837       for (auto &RU : Result->uses())
3838         if (!DT.dominates(NoWrapEdge, RU))
3839           return false;
3840     }
3841 
3842     return true;
3843   };
3844 
3845   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
3846 }
3847 
3848 
3849 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3850                                                  const DataLayout &DL,
3851                                                  AssumptionCache *AC,
3852                                                  const Instruction *CxtI,
3853                                                  const DominatorTree *DT) {
3854   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3855                                        Add, DL, AC, CxtI, DT);
3856 }
3857 
3858 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3859                                                  const Value *RHS,
3860                                                  const DataLayout &DL,
3861                                                  AssumptionCache *AC,
3862                                                  const Instruction *CxtI,
3863                                                  const DominatorTree *DT) {
3864   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3865 }
3866 
3867 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3868   // A memory operation returns normally if it isn't volatile. A volatile
3869   // operation is allowed to trap.
3870   //
3871   // An atomic operation isn't guaranteed to return in a reasonable amount of
3872   // time because it's possible for another thread to interfere with it for an
3873   // arbitrary length of time, but programs aren't allowed to rely on that.
3874   if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3875     return !LI->isVolatile();
3876   if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3877     return !SI->isVolatile();
3878   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3879     return !CXI->isVolatile();
3880   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3881     return !RMWI->isVolatile();
3882   if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3883     return !MII->isVolatile();
3884 
3885   // If there is no successor, then execution can't transfer to it.
3886   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3887     return !CRI->unwindsToCaller();
3888   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3889     return !CatchSwitch->unwindsToCaller();
3890   if (isa<ResumeInst>(I))
3891     return false;
3892   if (isa<ReturnInst>(I))
3893     return false;
3894   if (isa<UnreachableInst>(I))
3895     return false;
3896 
3897   // Calls can throw, or contain an infinite loop, or kill the process.
3898   if (auto CS = ImmutableCallSite(I)) {
3899     // Call sites that throw have implicit non-local control flow.
3900     if (!CS.doesNotThrow())
3901       return false;
3902 
3903     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3904     // etc. and thus not return.  However, LLVM already assumes that
3905     //
3906     //  - Thread exiting actions are modeled as writes to memory invisible to
3907     //    the program.
3908     //
3909     //  - Loops that don't have side effects (side effects are volatile/atomic
3910     //    stores and IO) always terminate (see http://llvm.org/PR965).
3911     //    Furthermore IO itself is also modeled as writes to memory invisible to
3912     //    the program.
3913     //
3914     // We rely on those assumptions here, and use the memory effects of the call
3915     // target as a proxy for checking that it always returns.
3916 
3917     // FIXME: This isn't aggressive enough; a call which only writes to a global
3918     // is guaranteed to return.
3919     return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3920            match(I, m_Intrinsic<Intrinsic::assume>()) ||
3921            match(I, m_Intrinsic<Intrinsic::sideeffect>());
3922   }
3923 
3924   // Other instructions return normally.
3925   return true;
3926 }
3927 
3928 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3929                                                   const Loop *L) {
3930   // The loop header is guaranteed to be executed for every iteration.
3931   //
3932   // FIXME: Relax this constraint to cover all basic blocks that are
3933   // guaranteed to be executed at every iteration.
3934   if (I->getParent() != L->getHeader()) return false;
3935 
3936   for (const Instruction &LI : *L->getHeader()) {
3937     if (&LI == I) return true;
3938     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3939   }
3940   llvm_unreachable("Instruction not contained in its own parent basic block.");
3941 }
3942 
3943 bool llvm::propagatesFullPoison(const Instruction *I) {
3944   switch (I->getOpcode()) {
3945   case Instruction::Add:
3946   case Instruction::Sub:
3947   case Instruction::Xor:
3948   case Instruction::Trunc:
3949   case Instruction::BitCast:
3950   case Instruction::AddrSpaceCast:
3951   case Instruction::Mul:
3952   case Instruction::Shl:
3953   case Instruction::GetElementPtr:
3954     // These operations all propagate poison unconditionally. Note that poison
3955     // is not any particular value, so xor or subtraction of poison with
3956     // itself still yields poison, not zero.
3957     return true;
3958 
3959   case Instruction::AShr:
3960   case Instruction::SExt:
3961     // For these operations, one bit of the input is replicated across
3962     // multiple output bits. A replicated poison bit is still poison.
3963     return true;
3964 
3965   case Instruction::ICmp:
3966     // Comparing poison with any value yields poison.  This is why, for
3967     // instance, x s< (x +nsw 1) can be folded to true.
3968     return true;
3969 
3970   default:
3971     return false;
3972   }
3973 }
3974 
3975 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3976   switch (I->getOpcode()) {
3977     case Instruction::Store:
3978       return cast<StoreInst>(I)->getPointerOperand();
3979 
3980     case Instruction::Load:
3981       return cast<LoadInst>(I)->getPointerOperand();
3982 
3983     case Instruction::AtomicCmpXchg:
3984       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3985 
3986     case Instruction::AtomicRMW:
3987       return cast<AtomicRMWInst>(I)->getPointerOperand();
3988 
3989     case Instruction::UDiv:
3990     case Instruction::SDiv:
3991     case Instruction::URem:
3992     case Instruction::SRem:
3993       return I->getOperand(1);
3994 
3995     default:
3996       return nullptr;
3997   }
3998 }
3999 
4000 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
4001   // We currently only look for uses of poison values within the same basic
4002   // block, as that makes it easier to guarantee that the uses will be
4003   // executed given that PoisonI is executed.
4004   //
4005   // FIXME: Expand this to consider uses beyond the same basic block. To do
4006   // this, look out for the distinction between post-dominance and strong
4007   // post-dominance.
4008   const BasicBlock *BB = PoisonI->getParent();
4009 
4010   // Set of instructions that we have proved will yield poison if PoisonI
4011   // does.
4012   SmallSet<const Value *, 16> YieldsPoison;
4013   SmallSet<const BasicBlock *, 4> Visited;
4014   YieldsPoison.insert(PoisonI);
4015   Visited.insert(PoisonI->getParent());
4016 
4017   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4018 
4019   unsigned Iter = 0;
4020   while (Iter++ < MaxDepth) {
4021     for (auto &I : make_range(Begin, End)) {
4022       if (&I != PoisonI) {
4023         const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
4024         if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
4025           return true;
4026         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4027           return false;
4028       }
4029 
4030       // Mark poison that propagates from I through uses of I.
4031       if (YieldsPoison.count(&I)) {
4032         for (const User *User : I.users()) {
4033           const Instruction *UserI = cast<Instruction>(User);
4034           if (propagatesFullPoison(UserI))
4035             YieldsPoison.insert(User);
4036         }
4037       }
4038     }
4039 
4040     if (auto *NextBB = BB->getSingleSuccessor()) {
4041       if (Visited.insert(NextBB).second) {
4042         BB = NextBB;
4043         Begin = BB->getFirstNonPHI()->getIterator();
4044         End = BB->end();
4045         continue;
4046       }
4047     }
4048 
4049     break;
4050   }
4051   return false;
4052 }
4053 
4054 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4055   if (FMF.noNaNs())
4056     return true;
4057 
4058   if (auto *C = dyn_cast<ConstantFP>(V))
4059     return !C->isNaN();
4060   return false;
4061 }
4062 
4063 static bool isKnownNonZero(const Value *V) {
4064   if (auto *C = dyn_cast<ConstantFP>(V))
4065     return !C->isZero();
4066   return false;
4067 }
4068 
4069 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4070 /// Given non-min/max outer cmp/select from the clamp pattern this
4071 /// function recognizes if it can be substitued by a "canonical" min/max
4072 /// pattern.
4073 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4074                                                Value *CmpLHS, Value *CmpRHS,
4075                                                Value *TrueVal, Value *FalseVal,
4076                                                Value *&LHS, Value *&RHS) {
4077   // Try to match
4078   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4079   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4080   // and return description of the outer Max/Min.
4081 
4082   // First, check if select has inverse order:
4083   if (CmpRHS == FalseVal) {
4084     std::swap(TrueVal, FalseVal);
4085     Pred = CmpInst::getInversePredicate(Pred);
4086   }
4087 
4088   // Assume success now. If there's no match, callers should not use these anyway.
4089   LHS = TrueVal;
4090   RHS = FalseVal;
4091 
4092   const APFloat *FC1;
4093   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4094     return {SPF_UNKNOWN, SPNB_NA, false};
4095 
4096   const APFloat *FC2;
4097   switch (Pred) {
4098   case CmpInst::FCMP_OLT:
4099   case CmpInst::FCMP_OLE:
4100   case CmpInst::FCMP_ULT:
4101   case CmpInst::FCMP_ULE:
4102     if (match(FalseVal,
4103               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4104                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4105         FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4106       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4107     break;
4108   case CmpInst::FCMP_OGT:
4109   case CmpInst::FCMP_OGE:
4110   case CmpInst::FCMP_UGT:
4111   case CmpInst::FCMP_UGE:
4112     if (match(FalseVal,
4113               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4114                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4115         FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4116       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4117     break;
4118   default:
4119     break;
4120   }
4121 
4122   return {SPF_UNKNOWN, SPNB_NA, false};
4123 }
4124 
4125 /// Recognize variations of:
4126 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4127 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
4128                                       Value *CmpLHS, Value *CmpRHS,
4129                                       Value *TrueVal, Value *FalseVal) {
4130   // Swap the select operands and predicate to match the patterns below.
4131   if (CmpRHS != TrueVal) {
4132     Pred = ICmpInst::getSwappedPredicate(Pred);
4133     std::swap(TrueVal, FalseVal);
4134   }
4135   const APInt *C1;
4136   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4137     const APInt *C2;
4138     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4139     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4140         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4141       return {SPF_SMAX, SPNB_NA, false};
4142 
4143     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4144     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4145         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4146       return {SPF_SMIN, SPNB_NA, false};
4147 
4148     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4149     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4150         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4151       return {SPF_UMAX, SPNB_NA, false};
4152 
4153     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4154     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4155         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4156       return {SPF_UMIN, SPNB_NA, false};
4157   }
4158   return {SPF_UNKNOWN, SPNB_NA, false};
4159 }
4160 
4161 /// Recognize variations of:
4162 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4163 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
4164                                                Value *CmpLHS, Value *CmpRHS,
4165                                                Value *TVal, Value *FVal,
4166                                                unsigned Depth) {
4167   // TODO: Allow FP min/max with nnan/nsz.
4168   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4169 
4170   Value *A, *B;
4171   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4172   if (!SelectPatternResult::isMinOrMax(L.Flavor))
4173     return {SPF_UNKNOWN, SPNB_NA, false};
4174 
4175   Value *C, *D;
4176   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4177   if (L.Flavor != R.Flavor)
4178     return {SPF_UNKNOWN, SPNB_NA, false};
4179 
4180   // We have something like: x Pred y ? min(a, b) : min(c, d).
4181   // Try to match the compare to the min/max operations of the select operands.
4182   // First, make sure we have the right compare predicate.
4183   switch (L.Flavor) {
4184   case SPF_SMIN:
4185     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4186       Pred = ICmpInst::getSwappedPredicate(Pred);
4187       std::swap(CmpLHS, CmpRHS);
4188     }
4189     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4190       break;
4191     return {SPF_UNKNOWN, SPNB_NA, false};
4192   case SPF_SMAX:
4193     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4194       Pred = ICmpInst::getSwappedPredicate(Pred);
4195       std::swap(CmpLHS, CmpRHS);
4196     }
4197     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4198       break;
4199     return {SPF_UNKNOWN, SPNB_NA, false};
4200   case SPF_UMIN:
4201     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4202       Pred = ICmpInst::getSwappedPredicate(Pred);
4203       std::swap(CmpLHS, CmpRHS);
4204     }
4205     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4206       break;
4207     return {SPF_UNKNOWN, SPNB_NA, false};
4208   case SPF_UMAX:
4209     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4210       Pred = ICmpInst::getSwappedPredicate(Pred);
4211       std::swap(CmpLHS, CmpRHS);
4212     }
4213     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4214       break;
4215     return {SPF_UNKNOWN, SPNB_NA, false};
4216   default:
4217     return {SPF_UNKNOWN, SPNB_NA, false};
4218   }
4219 
4220   // If there is a common operand in the already matched min/max and the other
4221   // min/max operands match the compare operands (either directly or inverted),
4222   // then this is min/max of the same flavor.
4223 
4224   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4225   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4226   if (D == B) {
4227     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4228                                          match(A, m_Not(m_Specific(CmpRHS)))))
4229       return {L.Flavor, SPNB_NA, false};
4230   }
4231   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4232   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4233   if (C == B) {
4234     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4235                                          match(A, m_Not(m_Specific(CmpRHS)))))
4236       return {L.Flavor, SPNB_NA, false};
4237   }
4238   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4239   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4240   if (D == A) {
4241     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4242                                          match(B, m_Not(m_Specific(CmpRHS)))))
4243       return {L.Flavor, SPNB_NA, false};
4244   }
4245   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4246   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4247   if (C == A) {
4248     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4249                                          match(B, m_Not(m_Specific(CmpRHS)))))
4250       return {L.Flavor, SPNB_NA, false};
4251   }
4252 
4253   return {SPF_UNKNOWN, SPNB_NA, false};
4254 }
4255 
4256 /// Match non-obvious integer minimum and maximum sequences.
4257 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4258                                        Value *CmpLHS, Value *CmpRHS,
4259                                        Value *TrueVal, Value *FalseVal,
4260                                        Value *&LHS, Value *&RHS,
4261                                        unsigned Depth) {
4262   // Assume success. If there's no match, callers should not use these anyway.
4263   LHS = TrueVal;
4264   RHS = FalseVal;
4265 
4266   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4267   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4268     return SPR;
4269 
4270   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4271   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4272     return SPR;
4273 
4274   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4275     return {SPF_UNKNOWN, SPNB_NA, false};
4276 
4277   // Z = X -nsw Y
4278   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4279   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4280   if (match(TrueVal, m_Zero()) &&
4281       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4282     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4283 
4284   // Z = X -nsw Y
4285   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4286   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4287   if (match(FalseVal, m_Zero()) &&
4288       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4289     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4290 
4291   const APInt *C1;
4292   if (!match(CmpRHS, m_APInt(C1)))
4293     return {SPF_UNKNOWN, SPNB_NA, false};
4294 
4295   // An unsigned min/max can be written with a signed compare.
4296   const APInt *C2;
4297   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4298       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4299     // Is the sign bit set?
4300     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4301     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4302     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4303         C2->isMaxSignedValue())
4304       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4305 
4306     // Is the sign bit clear?
4307     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4308     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4309     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4310         C2->isMinSignedValue())
4311       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4312   }
4313 
4314   // Look through 'not' ops to find disguised signed min/max.
4315   // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4316   // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4317   if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4318       match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4319     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4320 
4321   // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4322   // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4323   if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4324       match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4325     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4326 
4327   return {SPF_UNKNOWN, SPNB_NA, false};
4328 }
4329 
4330 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4331                                               FastMathFlags FMF,
4332                                               Value *CmpLHS, Value *CmpRHS,
4333                                               Value *TrueVal, Value *FalseVal,
4334                                               Value *&LHS, Value *&RHS,
4335                                               unsigned Depth) {
4336   LHS = CmpLHS;
4337   RHS = CmpRHS;
4338 
4339   // Signed zero may return inconsistent results between implementations.
4340   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4341   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4342   // Therefore, we behave conservatively and only proceed if at least one of the
4343   // operands is known to not be zero or if we don't care about signed zero.
4344   switch (Pred) {
4345   default: break;
4346   // FIXME: Include OGT/OLT/UGT/ULT.
4347   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4348   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4349     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4350         !isKnownNonZero(CmpRHS))
4351       return {SPF_UNKNOWN, SPNB_NA, false};
4352   }
4353 
4354   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4355   bool Ordered = false;
4356 
4357   // When given one NaN and one non-NaN input:
4358   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4359   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4360   //     ordered comparison fails), which could be NaN or non-NaN.
4361   // so here we discover exactly what NaN behavior is required/accepted.
4362   if (CmpInst::isFPPredicate(Pred)) {
4363     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4364     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4365 
4366     if (LHSSafe && RHSSafe) {
4367       // Both operands are known non-NaN.
4368       NaNBehavior = SPNB_RETURNS_ANY;
4369     } else if (CmpInst::isOrdered(Pred)) {
4370       // An ordered comparison will return false when given a NaN, so it
4371       // returns the RHS.
4372       Ordered = true;
4373       if (LHSSafe)
4374         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4375         NaNBehavior = SPNB_RETURNS_NAN;
4376       else if (RHSSafe)
4377         NaNBehavior = SPNB_RETURNS_OTHER;
4378       else
4379         // Completely unsafe.
4380         return {SPF_UNKNOWN, SPNB_NA, false};
4381     } else {
4382       Ordered = false;
4383       // An unordered comparison will return true when given a NaN, so it
4384       // returns the LHS.
4385       if (LHSSafe)
4386         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4387         NaNBehavior = SPNB_RETURNS_OTHER;
4388       else if (RHSSafe)
4389         NaNBehavior = SPNB_RETURNS_NAN;
4390       else
4391         // Completely unsafe.
4392         return {SPF_UNKNOWN, SPNB_NA, false};
4393     }
4394   }
4395 
4396   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4397     std::swap(CmpLHS, CmpRHS);
4398     Pred = CmpInst::getSwappedPredicate(Pred);
4399     if (NaNBehavior == SPNB_RETURNS_NAN)
4400       NaNBehavior = SPNB_RETURNS_OTHER;
4401     else if (NaNBehavior == SPNB_RETURNS_OTHER)
4402       NaNBehavior = SPNB_RETURNS_NAN;
4403     Ordered = !Ordered;
4404   }
4405 
4406   // ([if]cmp X, Y) ? X : Y
4407   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4408     switch (Pred) {
4409     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4410     case ICmpInst::ICMP_UGT:
4411     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4412     case ICmpInst::ICMP_SGT:
4413     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4414     case ICmpInst::ICMP_ULT:
4415     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4416     case ICmpInst::ICMP_SLT:
4417     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4418     case FCmpInst::FCMP_UGT:
4419     case FCmpInst::FCMP_UGE:
4420     case FCmpInst::FCMP_OGT:
4421     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4422     case FCmpInst::FCMP_ULT:
4423     case FCmpInst::FCMP_ULE:
4424     case FCmpInst::FCMP_OLT:
4425     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4426     }
4427   }
4428 
4429   const APInt *C1;
4430   if (match(CmpRHS, m_APInt(C1))) {
4431     if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4432         (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4433 
4434       // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4435       // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4436       if (Pred == ICmpInst::ICMP_SGT &&
4437           (C1->isNullValue() || C1->isAllOnesValue())) {
4438         return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4439       }
4440 
4441       // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4442       // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4443       if (Pred == ICmpInst::ICMP_SLT &&
4444           (C1->isNullValue() || C1->isOneValue())) {
4445         return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4446       }
4447     }
4448   }
4449 
4450   if (CmpInst::isIntPredicate(Pred))
4451     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4452 
4453   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4454   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4455   // semantics than minNum. Be conservative in such case.
4456   if (NaNBehavior != SPNB_RETURNS_ANY ||
4457       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4458        !isKnownNonZero(CmpRHS)))
4459     return {SPF_UNKNOWN, SPNB_NA, false};
4460 
4461   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4462 }
4463 
4464 /// Helps to match a select pattern in case of a type mismatch.
4465 ///
4466 /// The function processes the case when type of true and false values of a
4467 /// select instruction differs from type of the cmp instruction operands because
4468 /// of a cast instructon. The function checks if it is legal to move the cast
4469 /// operation after "select". If yes, it returns the new second value of
4470 /// "select" (with the assumption that cast is moved):
4471 /// 1. As operand of cast instruction when both values of "select" are same cast
4472 /// instructions.
4473 /// 2. As restored constant (by applying reverse cast operation) when the first
4474 /// value of the "select" is a cast operation and the second value is a
4475 /// constant.
4476 /// NOTE: We return only the new second value because the first value could be
4477 /// accessed as operand of cast instruction.
4478 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4479                               Instruction::CastOps *CastOp) {
4480   auto *Cast1 = dyn_cast<CastInst>(V1);
4481   if (!Cast1)
4482     return nullptr;
4483 
4484   *CastOp = Cast1->getOpcode();
4485   Type *SrcTy = Cast1->getSrcTy();
4486   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4487     // If V1 and V2 are both the same cast from the same type, look through V1.
4488     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4489       return Cast2->getOperand(0);
4490     return nullptr;
4491   }
4492 
4493   auto *C = dyn_cast<Constant>(V2);
4494   if (!C)
4495     return nullptr;
4496 
4497   Constant *CastedTo = nullptr;
4498   switch (*CastOp) {
4499   case Instruction::ZExt:
4500     if (CmpI->isUnsigned())
4501       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4502     break;
4503   case Instruction::SExt:
4504     if (CmpI->isSigned())
4505       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4506     break;
4507   case Instruction::Trunc:
4508     Constant *CmpConst;
4509     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4510         CmpConst->getType() == SrcTy) {
4511       // Here we have the following case:
4512       //
4513       //   %cond = cmp iN %x, CmpConst
4514       //   %tr = trunc iN %x to iK
4515       //   %narrowsel = select i1 %cond, iK %t, iK C
4516       //
4517       // We can always move trunc after select operation:
4518       //
4519       //   %cond = cmp iN %x, CmpConst
4520       //   %widesel = select i1 %cond, iN %x, iN CmpConst
4521       //   %tr = trunc iN %widesel to iK
4522       //
4523       // Note that C could be extended in any way because we don't care about
4524       // upper bits after truncation. It can't be abs pattern, because it would
4525       // look like:
4526       //
4527       //   select i1 %cond, x, -x.
4528       //
4529       // So only min/max pattern could be matched. Such match requires widened C
4530       // == CmpConst. That is why set widened C = CmpConst, condition trunc
4531       // CmpConst == C is checked below.
4532       CastedTo = CmpConst;
4533     } else {
4534       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4535     }
4536     break;
4537   case Instruction::FPTrunc:
4538     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4539     break;
4540   case Instruction::FPExt:
4541     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4542     break;
4543   case Instruction::FPToUI:
4544     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4545     break;
4546   case Instruction::FPToSI:
4547     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4548     break;
4549   case Instruction::UIToFP:
4550     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4551     break;
4552   case Instruction::SIToFP:
4553     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4554     break;
4555   default:
4556     break;
4557   }
4558 
4559   if (!CastedTo)
4560     return nullptr;
4561 
4562   // Make sure the cast doesn't lose any information.
4563   Constant *CastedBack =
4564       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4565   if (CastedBack != C)
4566     return nullptr;
4567 
4568   return CastedTo;
4569 }
4570 
4571 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4572                                              Instruction::CastOps *CastOp,
4573                                              unsigned Depth) {
4574   if (Depth >= MaxDepth)
4575     return {SPF_UNKNOWN, SPNB_NA, false};
4576 
4577   SelectInst *SI = dyn_cast<SelectInst>(V);
4578   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4579 
4580   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4581   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4582 
4583   CmpInst::Predicate Pred = CmpI->getPredicate();
4584   Value *CmpLHS = CmpI->getOperand(0);
4585   Value *CmpRHS = CmpI->getOperand(1);
4586   Value *TrueVal = SI->getTrueValue();
4587   Value *FalseVal = SI->getFalseValue();
4588   FastMathFlags FMF;
4589   if (isa<FPMathOperator>(CmpI))
4590     FMF = CmpI->getFastMathFlags();
4591 
4592   // Bail out early.
4593   if (CmpI->isEquality())
4594     return {SPF_UNKNOWN, SPNB_NA, false};
4595 
4596   // Deal with type mismatches.
4597   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4598     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
4599       // If this is a potential fmin/fmax with a cast to integer, then ignore
4600       // -0.0 because there is no corresponding integer value.
4601       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4602         FMF.setNoSignedZeros();
4603       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4604                                   cast<CastInst>(TrueVal)->getOperand(0), C,
4605                                   LHS, RHS, Depth);
4606     }
4607     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
4608       // If this is a potential fmin/fmax with a cast to integer, then ignore
4609       // -0.0 because there is no corresponding integer value.
4610       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4611         FMF.setNoSignedZeros();
4612       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4613                                   C, cast<CastInst>(FalseVal)->getOperand(0),
4614                                   LHS, RHS, Depth);
4615     }
4616   }
4617   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4618                               LHS, RHS, Depth);
4619 }
4620 
4621 /// Return true if "icmp Pred LHS RHS" is always true.
4622 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
4623                             const Value *RHS, const DataLayout &DL,
4624                             unsigned Depth) {
4625   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4626   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4627     return true;
4628 
4629   switch (Pred) {
4630   default:
4631     return false;
4632 
4633   case CmpInst::ICMP_SLE: {
4634     const APInt *C;
4635 
4636     // LHS s<= LHS +_{nsw} C   if C >= 0
4637     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4638       return !C->isNegative();
4639     return false;
4640   }
4641 
4642   case CmpInst::ICMP_ULE: {
4643     const APInt *C;
4644 
4645     // LHS u<= LHS +_{nuw} C   for any C
4646     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4647       return true;
4648 
4649     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4650     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4651                                        const Value *&X,
4652                                        const APInt *&CA, const APInt *&CB) {
4653       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4654           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4655         return true;
4656 
4657       // If X & C == 0 then (X | C) == X +_{nuw} C
4658       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4659           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4660         KnownBits Known(CA->getBitWidth());
4661         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
4662                          /*CxtI*/ nullptr, /*DT*/ nullptr);
4663         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
4664           return true;
4665       }
4666 
4667       return false;
4668     };
4669 
4670     const Value *X;
4671     const APInt *CLHS, *CRHS;
4672     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4673       return CLHS->ule(*CRHS);
4674 
4675     return false;
4676   }
4677   }
4678 }
4679 
4680 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4681 /// ALHS ARHS" is true.  Otherwise, return None.
4682 static Optional<bool>
4683 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4684                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
4685                       const DataLayout &DL, unsigned Depth) {
4686   switch (Pred) {
4687   default:
4688     return None;
4689 
4690   case CmpInst::ICMP_SLT:
4691   case CmpInst::ICMP_SLE:
4692     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
4693         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
4694       return true;
4695     return None;
4696 
4697   case CmpInst::ICMP_ULT:
4698   case CmpInst::ICMP_ULE:
4699     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
4700         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
4701       return true;
4702     return None;
4703   }
4704 }
4705 
4706 /// Return true if the operands of the two compares match.  IsSwappedOps is true
4707 /// when the operands match, but are swapped.
4708 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4709                           const Value *BLHS, const Value *BRHS,
4710                           bool &IsSwappedOps) {
4711 
4712   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4713   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4714   return IsMatchingOps || IsSwappedOps;
4715 }
4716 
4717 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4718 /// true.  Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4719 /// BRHS" is false.  Otherwise, return None if we can't infer anything.
4720 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4721                                                     const Value *ALHS,
4722                                                     const Value *ARHS,
4723                                                     CmpInst::Predicate BPred,
4724                                                     const Value *BLHS,
4725                                                     const Value *BRHS,
4726                                                     bool IsSwappedOps) {
4727   // Canonicalize the operands so they're matching.
4728   if (IsSwappedOps) {
4729     std::swap(BLHS, BRHS);
4730     BPred = ICmpInst::getSwappedPredicate(BPred);
4731   }
4732   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4733     return true;
4734   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4735     return false;
4736 
4737   return None;
4738 }
4739 
4740 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4741 /// true.  Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4742 /// C2" is false.  Otherwise, return None if we can't infer anything.
4743 static Optional<bool>
4744 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4745                                  const ConstantInt *C1,
4746                                  CmpInst::Predicate BPred,
4747                                  const Value *BLHS, const ConstantInt *C2) {
4748   assert(ALHS == BLHS && "LHS operands must match.");
4749   ConstantRange DomCR =
4750       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4751   ConstantRange CR =
4752       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4753   ConstantRange Intersection = DomCR.intersectWith(CR);
4754   ConstantRange Difference = DomCR.difference(CR);
4755   if (Intersection.isEmptySet())
4756     return false;
4757   if (Difference.isEmptySet())
4758     return true;
4759   return None;
4760 }
4761 
4762 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
4763 /// false.  Otherwise, return None if we can't infer anything.
4764 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
4765                                          const ICmpInst *RHS,
4766                                          const DataLayout &DL, bool LHSIsTrue,
4767                                          unsigned Depth) {
4768   Value *ALHS = LHS->getOperand(0);
4769   Value *ARHS = LHS->getOperand(1);
4770   // The rest of the logic assumes the LHS condition is true.  If that's not the
4771   // case, invert the predicate to make it so.
4772   ICmpInst::Predicate APred =
4773       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
4774 
4775   Value *BLHS = RHS->getOperand(0);
4776   Value *BRHS = RHS->getOperand(1);
4777   ICmpInst::Predicate BPred = RHS->getPredicate();
4778 
4779   // Can we infer anything when the two compares have matching operands?
4780   bool IsSwappedOps;
4781   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4782     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4783             APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4784       return Implication;
4785     // No amount of additional analysis will infer the second condition, so
4786     // early exit.
4787     return None;
4788   }
4789 
4790   // Can we infer anything when the LHS operands match and the RHS operands are
4791   // constants (not necessarily matching)?
4792   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4793     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4794             APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4795             cast<ConstantInt>(BRHS)))
4796       return Implication;
4797     // No amount of additional analysis will infer the second condition, so
4798     // early exit.
4799     return None;
4800   }
4801 
4802   if (APred == BPred)
4803     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
4804   return None;
4805 }
4806 
4807 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
4808 /// false.  Otherwise, return None if we can't infer anything.  We expect the
4809 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
4810 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
4811                                          const ICmpInst *RHS,
4812                                          const DataLayout &DL, bool LHSIsTrue,
4813                                          unsigned Depth) {
4814   // The LHS must be an 'or' or an 'and' instruction.
4815   assert((LHS->getOpcode() == Instruction::And ||
4816           LHS->getOpcode() == Instruction::Or) &&
4817          "Expected LHS to be 'and' or 'or'.");
4818 
4819   assert(Depth <= MaxDepth && "Hit recursion limit");
4820 
4821   // If the result of an 'or' is false, then we know both legs of the 'or' are
4822   // false.  Similarly, if the result of an 'and' is true, then we know both
4823   // legs of the 'and' are true.
4824   Value *ALHS, *ARHS;
4825   if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
4826       (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
4827     // FIXME: Make this non-recursion.
4828     if (Optional<bool> Implication =
4829             isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
4830       return Implication;
4831     if (Optional<bool> Implication =
4832             isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
4833       return Implication;
4834     return None;
4835   }
4836   return None;
4837 }
4838 
4839 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4840                                         const DataLayout &DL, bool LHSIsTrue,
4841                                         unsigned Depth) {
4842   // Bail out when we hit the limit.
4843   if (Depth == MaxDepth)
4844     return None;
4845 
4846   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
4847   // example.
4848   if (LHS->getType() != RHS->getType())
4849     return None;
4850 
4851   Type *OpTy = LHS->getType();
4852   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
4853 
4854   // LHS ==> RHS by definition
4855   if (LHS == RHS)
4856     return LHSIsTrue;
4857 
4858   // FIXME: Extending the code below to handle vectors.
4859   if (OpTy->isVectorTy())
4860     return None;
4861 
4862   assert(OpTy->isIntegerTy(1) && "implied by above");
4863 
4864   // Both LHS and RHS are icmps.
4865   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
4866   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
4867   if (LHSCmp && RHSCmp)
4868     return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
4869 
4870   // The LHS should be an 'or' or an 'and' instruction.  We expect the RHS to be
4871   // an icmp. FIXME: Add support for and/or on the RHS.
4872   const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
4873   if (LHSBO && RHSCmp) {
4874     if ((LHSBO->getOpcode() == Instruction::And ||
4875          LHSBO->getOpcode() == Instruction::Or))
4876       return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
4877   }
4878   return None;
4879 }
4880