1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
24 #include "llvm/Analysis/VectorUtils.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/GetElementPtrTypeIterator.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Metadata.h"
37 #include "llvm/IR/Operator.h"
38 #include "llvm/IR/PatternMatch.h"
39 #include "llvm/IR/Statepoint.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/MathExtras.h"
42 #include <algorithm>
43 #include <array>
44 #include <cstring>
45 using namespace llvm;
46 using namespace llvm::PatternMatch;
47 
48 const unsigned MaxDepth = 6;
49 
50 // Controls the number of uses of the value searched for possible
51 // dominating comparisons.
52 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
53                                               cl::Hidden, cl::init(20));
54 
55 // This optimization is known to cause performance regressions is some cases,
56 // keep it under a temporary flag for now.
57 static cl::opt<bool>
58 DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits",
59                               cl::Hidden, cl::init(true));
60 
61 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns
62 /// 0). For vector types, returns the element type's bitwidth.
63 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
64   if (unsigned BitWidth = Ty->getScalarSizeInBits())
65     return BitWidth;
66 
67   return DL.getPointerTypeSizeInBits(Ty);
68 }
69 
70 namespace {
71 // Simplifying using an assume can only be done in a particular control-flow
72 // context (the context instruction provides that context). If an assume and
73 // the context instruction are not in the same block then the DT helps in
74 // figuring out if we can use it.
75 struct Query {
76   const DataLayout &DL;
77   AssumptionCache *AC;
78   const Instruction *CxtI;
79   const DominatorTree *DT;
80   // Unlike the other analyses, this may be a nullptr because not all clients
81   // provide it currently.
82   OptimizationRemarkEmitter *ORE;
83 
84   /// Set of assumptions that should be excluded from further queries.
85   /// This is because of the potential for mutual recursion to cause
86   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
87   /// classic case of this is assume(x = y), which will attempt to determine
88   /// bits in x from bits in y, which will attempt to determine bits in y from
89   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
90   /// isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
91   /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so
92   /// on.
93   std::array<const Value *, MaxDepth> Excluded;
94   unsigned NumExcluded;
95 
96   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
97         const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
98       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), NumExcluded(0) {}
99 
100   Query(const Query &Q, const Value *NewExcl)
101       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
102         NumExcluded(Q.NumExcluded) {
103     Excluded = Q.Excluded;
104     Excluded[NumExcluded++] = NewExcl;
105     assert(NumExcluded <= Excluded.size());
106   }
107 
108   bool isExcluded(const Value *Value) const {
109     if (NumExcluded == 0)
110       return false;
111     auto End = Excluded.begin() + NumExcluded;
112     return std::find(Excluded.begin(), End, Value) != End;
113   }
114 };
115 } // end anonymous namespace
116 
117 // Given the provided Value and, potentially, a context instruction, return
118 // the preferred context instruction (if any).
119 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
120   // If we've been provided with a context instruction, then use that (provided
121   // it has been inserted).
122   if (CxtI && CxtI->getParent())
123     return CxtI;
124 
125   // If the value is really an already-inserted instruction, then use that.
126   CxtI = dyn_cast<Instruction>(V);
127   if (CxtI && CxtI->getParent())
128     return CxtI;
129 
130   return nullptr;
131 }
132 
133 static void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
134                              unsigned Depth, const Query &Q);
135 
136 void llvm::computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
137                             const DataLayout &DL, unsigned Depth,
138                             AssumptionCache *AC, const Instruction *CxtI,
139                             const DominatorTree *DT,
140                             OptimizationRemarkEmitter *ORE) {
141   ::computeKnownBits(V, KnownZero, KnownOne, Depth,
142                      Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
143 }
144 
145 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
146                                const DataLayout &DL,
147                                AssumptionCache *AC, const Instruction *CxtI,
148                                const DominatorTree *DT) {
149   assert(LHS->getType() == RHS->getType() &&
150          "LHS and RHS should have the same type");
151   assert(LHS->getType()->isIntOrIntVectorTy() &&
152          "LHS and RHS should be integers");
153   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
154   APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
155   APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
156   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
157   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
158   return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
159 }
160 
161 static void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
162                            unsigned Depth, const Query &Q);
163 
164 void llvm::ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
165                           const DataLayout &DL, unsigned Depth,
166                           AssumptionCache *AC, const Instruction *CxtI,
167                           const DominatorTree *DT) {
168   ::ComputeSignBit(V, KnownZero, KnownOne, Depth,
169                    Query(DL, AC, safeCxtI(V, CxtI), DT));
170 }
171 
172 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
173                                    const Query &Q);
174 
175 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
176                                   bool OrZero,
177                                   unsigned Depth, AssumptionCache *AC,
178                                   const Instruction *CxtI,
179                                   const DominatorTree *DT) {
180   return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
181                                   Query(DL, AC, safeCxtI(V, CxtI), DT));
182 }
183 
184 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
185 
186 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
187                           AssumptionCache *AC, const Instruction *CxtI,
188                           const DominatorTree *DT) {
189   return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
190 }
191 
192 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
193                               unsigned Depth,
194                               AssumptionCache *AC, const Instruction *CxtI,
195                               const DominatorTree *DT) {
196   bool NonNegative, Negative;
197   ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
198   return NonNegative;
199 }
200 
201 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
202                            AssumptionCache *AC, const Instruction *CxtI,
203                            const DominatorTree *DT) {
204   if (auto *CI = dyn_cast<ConstantInt>(V))
205     return CI->getValue().isStrictlyPositive();
206 
207   // TODO: We'd doing two recursive queries here.  We should factor this such
208   // that only a single query is needed.
209   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
210     isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
211 }
212 
213 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
214                            AssumptionCache *AC, const Instruction *CxtI,
215                            const DominatorTree *DT) {
216   bool NonNegative, Negative;
217   ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
218   return Negative;
219 }
220 
221 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
222 
223 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
224                            const DataLayout &DL,
225                            AssumptionCache *AC, const Instruction *CxtI,
226                            const DominatorTree *DT) {
227   return ::isKnownNonEqual(V1, V2, Query(DL, AC,
228                                          safeCxtI(V1, safeCxtI(V2, CxtI)),
229                                          DT));
230 }
231 
232 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
233                               const Query &Q);
234 
235 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
236                              const DataLayout &DL,
237                              unsigned Depth, AssumptionCache *AC,
238                              const Instruction *CxtI, const DominatorTree *DT) {
239   return ::MaskedValueIsZero(V, Mask, Depth,
240                              Query(DL, AC, safeCxtI(V, CxtI), DT));
241 }
242 
243 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
244                                    const Query &Q);
245 
246 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
247                                   unsigned Depth, AssumptionCache *AC,
248                                   const Instruction *CxtI,
249                                   const DominatorTree *DT) {
250   return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
251 }
252 
253 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
254                                    bool NSW,
255                                    APInt &KnownZero, APInt &KnownOne,
256                                    APInt &KnownZero2, APInt &KnownOne2,
257                                    unsigned Depth, const Query &Q) {
258   unsigned BitWidth = KnownZero.getBitWidth();
259 
260   // If an initial sequence of bits in the result is not needed, the
261   // corresponding bits in the operands are not needed.
262   APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
263   computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, Depth + 1, Q);
264   computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q);
265 
266   // Carry in a 1 for a subtract, rather than a 0.
267   uint64_t CarryIn = 0;
268   if (!Add) {
269     // Sum = LHS + ~RHS + 1
270     std::swap(KnownZero2, KnownOne2);
271     CarryIn = 1;
272   }
273 
274   APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
275   APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
276 
277   // Compute known bits of the carry.
278   APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
279   APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
280 
281   // Compute set of known bits (where all three relevant bits are known).
282   APInt LHSKnown = LHSKnownZero | LHSKnownOne;
283   APInt RHSKnown = KnownZero2 | KnownOne2;
284   APInt CarryKnown = CarryKnownZero | CarryKnownOne;
285   APInt Known = LHSKnown & RHSKnown & CarryKnown;
286 
287   assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
288          "known bits of sum differ");
289 
290   // Compute known bits of the result.
291   KnownZero = ~PossibleSumOne & Known;
292   KnownOne = PossibleSumOne & Known;
293 
294   // Are we still trying to solve for the sign bit?
295   if (!Known.isSignBitSet()) {
296     if (NSW) {
297       // Adding two non-negative numbers, or subtracting a negative number from
298       // a non-negative one, can't wrap into negative.
299       if (LHSKnownZero.isSignBitSet() && KnownZero2.isSignBitSet())
300         KnownZero.setSignBit();
301       // Adding two negative numbers, or subtracting a non-negative number from
302       // a negative one, can't wrap into non-negative.
303       else if (LHSKnownOne.isSignBitSet() && KnownOne2.isSignBitSet())
304         KnownOne.setSignBit();
305     }
306   }
307 }
308 
309 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
310                                 APInt &KnownZero, APInt &KnownOne,
311                                 APInt &KnownZero2, APInt &KnownOne2,
312                                 unsigned Depth, const Query &Q) {
313   unsigned BitWidth = KnownZero.getBitWidth();
314   computeKnownBits(Op1, KnownZero, KnownOne, Depth + 1, Q);
315   computeKnownBits(Op0, KnownZero2, KnownOne2, Depth + 1, Q);
316 
317   bool isKnownNegative = false;
318   bool isKnownNonNegative = false;
319   // If the multiplication is known not to overflow, compute the sign bit.
320   if (NSW) {
321     if (Op0 == Op1) {
322       // The product of a number with itself is non-negative.
323       isKnownNonNegative = true;
324     } else {
325       bool isKnownNonNegativeOp1 = KnownZero.isSignBitSet();
326       bool isKnownNonNegativeOp0 = KnownZero2.isSignBitSet();
327       bool isKnownNegativeOp1 = KnownOne.isSignBitSet();
328       bool isKnownNegativeOp0 = KnownOne2.isSignBitSet();
329       // The product of two numbers with the same sign is non-negative.
330       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
331         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
332       // The product of a negative number and a non-negative number is either
333       // negative or zero.
334       if (!isKnownNonNegative)
335         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
336                            isKnownNonZero(Op0, Depth, Q)) ||
337                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
338                            isKnownNonZero(Op1, Depth, Q));
339     }
340   }
341 
342   // If low bits are zero in either operand, output low known-0 bits.
343   // Also compute a conservative estimate for high known-0 bits.
344   // More trickiness is possible, but this is sufficient for the
345   // interesting case of alignment computation.
346   KnownOne.clearAllBits();
347   unsigned TrailZ = KnownZero.countTrailingOnes() +
348                     KnownZero2.countTrailingOnes();
349   unsigned LeadZ =  std::max(KnownZero.countLeadingOnes() +
350                              KnownZero2.countLeadingOnes(),
351                              BitWidth) - BitWidth;
352 
353   TrailZ = std::min(TrailZ, BitWidth);
354   LeadZ = std::min(LeadZ, BitWidth);
355   KnownZero.clearAllBits();
356   KnownZero.setLowBits(TrailZ);
357   KnownZero.setHighBits(LeadZ);
358 
359   // Only make use of no-wrap flags if we failed to compute the sign bit
360   // directly.  This matters if the multiplication always overflows, in
361   // which case we prefer to follow the result of the direct computation,
362   // though as the program is invoking undefined behaviour we can choose
363   // whatever we like here.
364   if (isKnownNonNegative && !KnownOne.isSignBitSet())
365     KnownZero.setSignBit();
366   else if (isKnownNegative && !KnownZero.isSignBitSet())
367     KnownOne.setSignBit();
368 }
369 
370 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
371                                              APInt &KnownZero,
372                                              APInt &KnownOne) {
373   unsigned BitWidth = KnownZero.getBitWidth();
374   unsigned NumRanges = Ranges.getNumOperands() / 2;
375   assert(NumRanges >= 1);
376 
377   KnownZero.setAllBits();
378   KnownOne.setAllBits();
379 
380   for (unsigned i = 0; i < NumRanges; ++i) {
381     ConstantInt *Lower =
382         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
383     ConstantInt *Upper =
384         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
385     ConstantRange Range(Lower->getValue(), Upper->getValue());
386 
387     // The first CommonPrefixBits of all values in Range are equal.
388     unsigned CommonPrefixBits =
389         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
390 
391     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
392     KnownOne &= Range.getUnsignedMax() & Mask;
393     KnownZero &= ~Range.getUnsignedMax() & Mask;
394   }
395 }
396 
397 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
398   SmallVector<const Value *, 16> WorkSet(1, I);
399   SmallPtrSet<const Value *, 32> Visited;
400   SmallPtrSet<const Value *, 16> EphValues;
401 
402   // The instruction defining an assumption's condition itself is always
403   // considered ephemeral to that assumption (even if it has other
404   // non-ephemeral users). See r246696's test case for an example.
405   if (is_contained(I->operands(), E))
406     return true;
407 
408   while (!WorkSet.empty()) {
409     const Value *V = WorkSet.pop_back_val();
410     if (!Visited.insert(V).second)
411       continue;
412 
413     // If all uses of this value are ephemeral, then so is this value.
414     if (all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) {
415       if (V == E)
416         return true;
417 
418       EphValues.insert(V);
419       if (const User *U = dyn_cast<User>(V))
420         for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
421              J != JE; ++J) {
422           if (isSafeToSpeculativelyExecute(*J))
423             WorkSet.push_back(*J);
424         }
425     }
426   }
427 
428   return false;
429 }
430 
431 // Is this an intrinsic that cannot be speculated but also cannot trap?
432 static bool isAssumeLikeIntrinsic(const Instruction *I) {
433   if (const CallInst *CI = dyn_cast<CallInst>(I))
434     if (Function *F = CI->getCalledFunction())
435       switch (F->getIntrinsicID()) {
436       default: break;
437       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
438       case Intrinsic::assume:
439       case Intrinsic::dbg_declare:
440       case Intrinsic::dbg_value:
441       case Intrinsic::invariant_start:
442       case Intrinsic::invariant_end:
443       case Intrinsic::lifetime_start:
444       case Intrinsic::lifetime_end:
445       case Intrinsic::objectsize:
446       case Intrinsic::ptr_annotation:
447       case Intrinsic::var_annotation:
448         return true;
449       }
450 
451   return false;
452 }
453 
454 bool llvm::isValidAssumeForContext(const Instruction *Inv,
455                                    const Instruction *CxtI,
456                                    const DominatorTree *DT) {
457 
458   // There are two restrictions on the use of an assume:
459   //  1. The assume must dominate the context (or the control flow must
460   //     reach the assume whenever it reaches the context).
461   //  2. The context must not be in the assume's set of ephemeral values
462   //     (otherwise we will use the assume to prove that the condition
463   //     feeding the assume is trivially true, thus causing the removal of
464   //     the assume).
465 
466   if (DT) {
467     if (DT->dominates(Inv, CxtI))
468       return true;
469   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
470     // We don't have a DT, but this trivially dominates.
471     return true;
472   }
473 
474   // With or without a DT, the only remaining case we will check is if the
475   // instructions are in the same BB.  Give up if that is not the case.
476   if (Inv->getParent() != CxtI->getParent())
477     return false;
478 
479   // If we have a dom tree, then we now know that the assume doens't dominate
480   // the other instruction.  If we don't have a dom tree then we can check if
481   // the assume is first in the BB.
482   if (!DT) {
483     // Search forward from the assume until we reach the context (or the end
484     // of the block); the common case is that the assume will come first.
485     for (auto I = std::next(BasicBlock::const_iterator(Inv)),
486          IE = Inv->getParent()->end(); I != IE; ++I)
487       if (&*I == CxtI)
488         return true;
489   }
490 
491   // The context comes first, but they're both in the same block. Make sure
492   // there is nothing in between that might interrupt the control flow.
493   for (BasicBlock::const_iterator I =
494          std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
495        I != IE; ++I)
496     if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
497       return false;
498 
499   return !isEphemeralValueOf(Inv, CxtI);
500 }
501 
502 static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
503                                        APInt &KnownOne, unsigned Depth,
504                                        const Query &Q) {
505   // Use of assumptions is context-sensitive. If we don't have a context, we
506   // cannot use them!
507   if (!Q.AC || !Q.CxtI)
508     return;
509 
510   unsigned BitWidth = KnownZero.getBitWidth();
511 
512   // Note that the patterns below need to be kept in sync with the code
513   // in AssumptionCache::updateAffectedValues.
514 
515   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
516     if (!AssumeVH)
517       continue;
518     CallInst *I = cast<CallInst>(AssumeVH);
519     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
520            "Got assumption for the wrong function!");
521     if (Q.isExcluded(I))
522       continue;
523 
524     // Warning: This loop can end up being somewhat performance sensetive.
525     // We're running this loop for once for each value queried resulting in a
526     // runtime of ~O(#assumes * #values).
527 
528     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
529            "must be an assume intrinsic");
530 
531     Value *Arg = I->getArgOperand(0);
532 
533     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
534       assert(BitWidth == 1 && "assume operand is not i1?");
535       KnownZero.clearAllBits();
536       KnownOne.setAllBits();
537       return;
538     }
539     if (match(Arg, m_Not(m_Specific(V))) &&
540         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
541       assert(BitWidth == 1 && "assume operand is not i1?");
542       KnownZero.setAllBits();
543       KnownOne.clearAllBits();
544       return;
545     }
546 
547     // The remaining tests are all recursive, so bail out if we hit the limit.
548     if (Depth == MaxDepth)
549       continue;
550 
551     Value *A, *B;
552     auto m_V = m_CombineOr(m_Specific(V),
553                            m_CombineOr(m_PtrToInt(m_Specific(V)),
554                            m_BitCast(m_Specific(V))));
555 
556     CmpInst::Predicate Pred;
557     ConstantInt *C;
558     // assume(v = a)
559     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
560         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
561       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
562       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
563       KnownZero |= RHSKnownZero;
564       KnownOne  |= RHSKnownOne;
565     // assume(v & b = a)
566     } else if (match(Arg,
567                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
568                Pred == ICmpInst::ICMP_EQ &&
569                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
570       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
571       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
572       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
573       computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
574 
575       // For those bits in the mask that are known to be one, we can propagate
576       // known bits from the RHS to V.
577       KnownZero |= RHSKnownZero & MaskKnownOne;
578       KnownOne  |= RHSKnownOne  & MaskKnownOne;
579     // assume(~(v & b) = a)
580     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
581                                    m_Value(A))) &&
582                Pred == ICmpInst::ICMP_EQ &&
583                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
584       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
585       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
586       APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
587       computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I));
588 
589       // For those bits in the mask that are known to be one, we can propagate
590       // inverted known bits from the RHS to V.
591       KnownZero |= RHSKnownOne  & MaskKnownOne;
592       KnownOne  |= RHSKnownZero & MaskKnownOne;
593     // assume(v | b = a)
594     } else if (match(Arg,
595                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
596                Pred == ICmpInst::ICMP_EQ &&
597                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
598       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
599       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
600       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
601       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
602 
603       // For those bits in B that are known to be zero, we can propagate known
604       // bits from the RHS to V.
605       KnownZero |= RHSKnownZero & BKnownZero;
606       KnownOne  |= RHSKnownOne  & BKnownZero;
607     // assume(~(v | b) = a)
608     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
609                                    m_Value(A))) &&
610                Pred == ICmpInst::ICMP_EQ &&
611                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
612       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
613       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
614       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
615       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
616 
617       // For those bits in B that are known to be zero, we can propagate
618       // inverted known bits from the RHS to V.
619       KnownZero |= RHSKnownOne  & BKnownZero;
620       KnownOne  |= RHSKnownZero & BKnownZero;
621     // assume(v ^ b = a)
622     } else if (match(Arg,
623                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
624                Pred == ICmpInst::ICMP_EQ &&
625                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
626       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
627       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
628       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
629       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
630 
631       // For those bits in B that are known to be zero, we can propagate known
632       // bits from the RHS to V. For those bits in B that are known to be one,
633       // we can propagate inverted known bits from the RHS to V.
634       KnownZero |= RHSKnownZero & BKnownZero;
635       KnownOne  |= RHSKnownOne  & BKnownZero;
636       KnownZero |= RHSKnownOne  & BKnownOne;
637       KnownOne  |= RHSKnownZero & BKnownOne;
638     // assume(~(v ^ b) = a)
639     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
640                                    m_Value(A))) &&
641                Pred == ICmpInst::ICMP_EQ &&
642                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
643       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
644       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
645       APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
646       computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I));
647 
648       // For those bits in B that are known to be zero, we can propagate
649       // inverted known bits from the RHS to V. For those bits in B that are
650       // known to be one, we can propagate known bits from the RHS to V.
651       KnownZero |= RHSKnownOne  & BKnownZero;
652       KnownOne  |= RHSKnownZero & BKnownZero;
653       KnownZero |= RHSKnownZero & BKnownOne;
654       KnownOne  |= RHSKnownOne  & BKnownOne;
655     // assume(v << c = a)
656     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
657                                    m_Value(A))) &&
658                Pred == ICmpInst::ICMP_EQ &&
659                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
660       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
661       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
662       // For those bits in RHS that are known, we can propagate them to known
663       // bits in V shifted to the right by C.
664       RHSKnownZero.lshrInPlace(C->getZExtValue());
665       KnownZero |= RHSKnownZero;
666       RHSKnownOne.lshrInPlace(C->getZExtValue());
667       KnownOne  |= RHSKnownOne;
668     // assume(~(v << c) = a)
669     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
670                                    m_Value(A))) &&
671                Pred == ICmpInst::ICMP_EQ &&
672                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
673       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
674       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
675       // For those bits in RHS that are known, we can propagate them inverted
676       // to known bits in V shifted to the right by C.
677       RHSKnownOne.lshrInPlace(C->getZExtValue());
678       KnownZero |= RHSKnownOne;
679       RHSKnownZero.lshrInPlace(C->getZExtValue());
680       KnownOne  |= RHSKnownZero;
681     // assume(v >> c = a)
682     } else if (match(Arg,
683                      m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
684                                                 m_AShr(m_V, m_ConstantInt(C))),
685                               m_Value(A))) &&
686                Pred == ICmpInst::ICMP_EQ &&
687                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
688       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
689       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
690       // For those bits in RHS that are known, we can propagate them to known
691       // bits in V shifted to the right by C.
692       KnownZero |= RHSKnownZero << C->getZExtValue();
693       KnownOne  |= RHSKnownOne  << C->getZExtValue();
694     // assume(~(v >> c) = a)
695     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
696                                              m_LShr(m_V, m_ConstantInt(C)),
697                                              m_AShr(m_V, m_ConstantInt(C)))),
698                                    m_Value(A))) &&
699                Pred == ICmpInst::ICMP_EQ &&
700                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
701       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
702       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
703       // For those bits in RHS that are known, we can propagate them inverted
704       // to known bits in V shifted to the right by C.
705       KnownZero |= RHSKnownOne  << C->getZExtValue();
706       KnownOne  |= RHSKnownZero << C->getZExtValue();
707     // assume(v >=_s c) where c is non-negative
708     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
709                Pred == ICmpInst::ICMP_SGE &&
710                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
711       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
712       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
713 
714       if (RHSKnownZero.isSignBitSet()) {
715         // We know that the sign bit is zero.
716         KnownZero.setSignBit();
717       }
718     // assume(v >_s c) where c is at least -1.
719     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
720                Pred == ICmpInst::ICMP_SGT &&
721                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
722       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
723       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
724 
725       if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isSignBitSet()) {
726         // We know that the sign bit is zero.
727         KnownZero.setSignBit();
728       }
729     // assume(v <=_s c) where c is negative
730     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
731                Pred == ICmpInst::ICMP_SLE &&
732                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
733       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
734       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
735 
736       if (RHSKnownOne.isSignBitSet()) {
737         // We know that the sign bit is one.
738         KnownOne.setSignBit();
739       }
740     // assume(v <_s c) where c is non-positive
741     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
742                Pred == ICmpInst::ICMP_SLT &&
743                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
744       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
745       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
746 
747       if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isSignBitSet()) {
748         // We know that the sign bit is one.
749         KnownOne.setSignBit();
750       }
751     // assume(v <=_u c)
752     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
753                Pred == ICmpInst::ICMP_ULE &&
754                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
755       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
756       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
757 
758       // Whatever high bits in c are zero are known to be zero.
759       KnownZero.setHighBits(RHSKnownZero.countLeadingOnes());
760     // assume(v <_u c)
761     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
762                Pred == ICmpInst::ICMP_ULT &&
763                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
764       APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
765       computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
766 
767       // Whatever high bits in c are zero are known to be zero (if c is a power
768       // of 2, then one more).
769       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
770         KnownZero.setHighBits(RHSKnownZero.countLeadingOnes()+1);
771       else
772         KnownZero.setHighBits(RHSKnownZero.countLeadingOnes());
773     }
774   }
775 
776   // If assumptions conflict with each other or previous known bits, then we
777   // have a logical fallacy. It's possible that the assumption is not reachable,
778   // so this isn't a real bug. On the other hand, the program may have undefined
779   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
780   // clear out the known bits, try to warn the user, and hope for the best.
781   if (KnownZero.intersects(KnownOne)) {
782     KnownZero.clearAllBits();
783     KnownOne.clearAllBits();
784 
785     if (Q.ORE) {
786       auto *CxtI = const_cast<Instruction *>(Q.CxtI);
787       OptimizationRemarkAnalysis ORA("value-tracking", "BadAssumption", CxtI);
788       Q.ORE->emit(ORA << "Detected conflicting code assumptions. Program may "
789                          "have undefined behavior, or compiler may have "
790                          "internal error.");
791     }
792   }
793 }
794 
795 // Compute known bits from a shift operator, including those with a
796 // non-constant shift amount. KnownZero and KnownOne are the outputs of this
797 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the
798 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific
799 // functors that, given the known-zero or known-one bits respectively, and a
800 // shift amount, compute the implied known-zero or known-one bits of the shift
801 // operator's result respectively for that shift amount. The results from calling
802 // KZF and KOF are conservatively combined for all permitted shift amounts.
803 static void computeKnownBitsFromShiftOperator(
804     const Operator *I, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2,
805     APInt &KnownOne2, unsigned Depth, const Query &Q,
806     function_ref<APInt(const APInt &, unsigned)> KZF,
807     function_ref<APInt(const APInt &, unsigned)> KOF) {
808   unsigned BitWidth = KnownZero.getBitWidth();
809 
810   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
811     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
812 
813     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
814     KnownZero = KZF(KnownZero, ShiftAmt);
815     KnownOne  = KOF(KnownOne, ShiftAmt);
816     // If there is conflict between KnownZero and KnownOne, this must be an
817     // overflowing left shift, so the shift result is undefined. Clear KnownZero
818     // and KnownOne bits so that other code could propagate this undef.
819     if ((KnownZero & KnownOne) != 0) {
820       KnownZero.clearAllBits();
821       KnownOne.clearAllBits();
822     }
823 
824     return;
825   }
826 
827   computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
828 
829   // If the shift amount could be greater than or equal to the bit-width of the LHS, the
830   // value could be undef, so we don't know anything about it.
831   if ((~KnownZero).uge(BitWidth)) {
832     KnownZero.clearAllBits();
833     KnownOne.clearAllBits();
834     return;
835   }
836 
837   // Note: We cannot use KnownZero.getLimitedValue() here, because if
838   // BitWidth > 64 and any upper bits are known, we'll end up returning the
839   // limit value (which implies all bits are known).
840   uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue();
841   uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue();
842 
843   // It would be more-clearly correct to use the two temporaries for this
844   // calculation. Reusing the APInts here to prevent unnecessary allocations.
845   KnownZero.clearAllBits();
846   KnownOne.clearAllBits();
847 
848   // If we know the shifter operand is nonzero, we can sometimes infer more
849   // known bits. However this is expensive to compute, so be lazy about it and
850   // only compute it when absolutely necessary.
851   Optional<bool> ShifterOperandIsNonZero;
852 
853   // Early exit if we can't constrain any well-defined shift amount.
854   if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
855     ShifterOperandIsNonZero =
856         isKnownNonZero(I->getOperand(1), Depth + 1, Q);
857     if (!*ShifterOperandIsNonZero)
858       return;
859   }
860 
861   computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
862 
863   KnownZero.setAllBits();
864   KnownOne.setAllBits();
865   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
866     // Combine the shifted known input bits only for those shift amounts
867     // compatible with its known constraints.
868     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
869       continue;
870     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
871       continue;
872     // If we know the shifter is nonzero, we may be able to infer more known
873     // bits. This check is sunk down as far as possible to avoid the expensive
874     // call to isKnownNonZero if the cheaper checks above fail.
875     if (ShiftAmt == 0) {
876       if (!ShifterOperandIsNonZero.hasValue())
877         ShifterOperandIsNonZero =
878             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
879       if (*ShifterOperandIsNonZero)
880         continue;
881     }
882 
883     KnownZero &= KZF(KnownZero2, ShiftAmt);
884     KnownOne  &= KOF(KnownOne2, ShiftAmt);
885   }
886 
887   // If there are no compatible shift amounts, then we've proven that the shift
888   // amount must be >= the BitWidth, and the result is undefined. We could
889   // return anything we'd like, but we need to make sure the sets of known bits
890   // stay disjoint (it should be better for some other code to actually
891   // propagate the undef than to pick a value here using known bits).
892   if (KnownZero.intersects(KnownOne)) {
893     KnownZero.clearAllBits();
894     KnownOne.clearAllBits();
895   }
896 }
897 
898 static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
899                                          APInt &KnownOne, unsigned Depth,
900                                          const Query &Q) {
901   unsigned BitWidth = KnownZero.getBitWidth();
902 
903   APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
904   switch (I->getOpcode()) {
905   default: break;
906   case Instruction::Load:
907     if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
908       computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
909     break;
910   case Instruction::And: {
911     // If either the LHS or the RHS are Zero, the result is zero.
912     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
913     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
914 
915     // Output known-1 bits are only known if set in both the LHS & RHS.
916     KnownOne &= KnownOne2;
917     // Output known-0 are known to be clear if zero in either the LHS | RHS.
918     KnownZero |= KnownZero2;
919 
920     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
921     // here we handle the more general case of adding any odd number by
922     // matching the form add(x, add(x, y)) where y is odd.
923     // TODO: This could be generalized to clearing any bit set in y where the
924     // following bit is known to be unset in y.
925     Value *Y = nullptr;
926     if (!KnownZero[0] && !KnownOne[0] &&
927         (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
928                                        m_Value(Y))) ||
929          match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
930                                        m_Value(Y))))) {
931       KnownZero2.clearAllBits(); KnownOne2.clearAllBits();
932       computeKnownBits(Y, KnownZero2, KnownOne2, Depth + 1, Q);
933       if (KnownOne2.countTrailingOnes() > 0)
934         KnownZero.setBit(0);
935     }
936     break;
937   }
938   case Instruction::Or: {
939     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
940     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
941 
942     // Output known-0 bits are only known if clear in both the LHS & RHS.
943     KnownZero &= KnownZero2;
944     // Output known-1 are known to be set if set in either the LHS | RHS.
945     KnownOne |= KnownOne2;
946     break;
947   }
948   case Instruction::Xor: {
949     computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
950     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
951 
952     // Output known-0 bits are known if clear or set in both the LHS & RHS.
953     APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
954     // Output known-1 are known to be set if set in only one of the LHS, RHS.
955     KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
956     KnownZero = std::move(KnownZeroOut);
957     break;
958   }
959   case Instruction::Mul: {
960     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
961     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
962                         KnownOne, KnownZero2, KnownOne2, Depth, Q);
963     break;
964   }
965   case Instruction::UDiv: {
966     // For the purposes of computing leading zeros we can conservatively
967     // treat a udiv as a logical right shift by the power of 2 known to
968     // be less than the denominator.
969     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
970     unsigned LeadZ = KnownZero2.countLeadingOnes();
971 
972     KnownOne2.clearAllBits();
973     KnownZero2.clearAllBits();
974     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
975     unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
976     if (RHSUnknownLeadingOnes != BitWidth)
977       LeadZ = std::min(BitWidth,
978                        LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
979 
980     KnownZero.setHighBits(LeadZ);
981     break;
982   }
983   case Instruction::Select: {
984     const Value *LHS, *RHS;
985     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
986     if (SelectPatternResult::isMinOrMax(SPF)) {
987       computeKnownBits(RHS, KnownZero, KnownOne, Depth + 1, Q);
988       computeKnownBits(LHS, KnownZero2, KnownOne2, Depth + 1, Q);
989     } else {
990       computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q);
991       computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
992     }
993 
994     unsigned MaxHighOnes = 0;
995     unsigned MaxHighZeros = 0;
996     if (SPF == SPF_SMAX) {
997       // If both sides are negative, the result is negative.
998       if (KnownOne.isSignBitSet() && KnownOne2.isSignBitSet())
999         // We can derive a lower bound on the result by taking the max of the
1000         // leading one bits.
1001         MaxHighOnes =
1002             std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
1003       // If either side is non-negative, the result is non-negative.
1004       else if (KnownZero.isSignBitSet() || KnownZero2.isSignBitSet())
1005         MaxHighZeros = 1;
1006     } else if (SPF == SPF_SMIN) {
1007       // If both sides are non-negative, the result is non-negative.
1008       if (KnownZero.isSignBitSet() && KnownZero2.isSignBitSet())
1009         // We can derive an upper bound on the result by taking the max of the
1010         // leading zero bits.
1011         MaxHighZeros = std::max(KnownZero.countLeadingOnes(),
1012                                 KnownZero2.countLeadingOnes());
1013       // If either side is negative, the result is negative.
1014       else if (KnownOne.isSignBitSet() || KnownOne2.isSignBitSet())
1015         MaxHighOnes = 1;
1016     } else if (SPF == SPF_UMAX) {
1017       // We can derive a lower bound on the result by taking the max of the
1018       // leading one bits.
1019       MaxHighOnes =
1020           std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes());
1021     } else if (SPF == SPF_UMIN) {
1022       // We can derive an upper bound on the result by taking the max of the
1023       // leading zero bits.
1024       MaxHighZeros =
1025           std::max(KnownZero.countLeadingOnes(), KnownZero2.countLeadingOnes());
1026     }
1027 
1028     // Only known if known in both the LHS and RHS.
1029     KnownOne &= KnownOne2;
1030     KnownZero &= KnownZero2;
1031     if (MaxHighOnes > 0)
1032       KnownOne.setHighBits(MaxHighOnes);
1033     if (MaxHighZeros > 0)
1034       KnownZero.setHighBits(MaxHighZeros);
1035     break;
1036   }
1037   case Instruction::FPTrunc:
1038   case Instruction::FPExt:
1039   case Instruction::FPToUI:
1040   case Instruction::FPToSI:
1041   case Instruction::SIToFP:
1042   case Instruction::UIToFP:
1043     break; // Can't work with floating point.
1044   case Instruction::PtrToInt:
1045   case Instruction::IntToPtr:
1046     // Fall through and handle them the same as zext/trunc.
1047     LLVM_FALLTHROUGH;
1048   case Instruction::ZExt:
1049   case Instruction::Trunc: {
1050     Type *SrcTy = I->getOperand(0)->getType();
1051 
1052     unsigned SrcBitWidth;
1053     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1054     // which fall through here.
1055     SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1056 
1057     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1058     KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
1059     KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
1060     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1061     KnownZero = KnownZero.zextOrTrunc(BitWidth);
1062     KnownOne = KnownOne.zextOrTrunc(BitWidth);
1063     // Any top bits are known to be zero.
1064     if (BitWidth > SrcBitWidth)
1065       KnownZero.setBitsFrom(SrcBitWidth);
1066     break;
1067   }
1068   case Instruction::BitCast: {
1069     Type *SrcTy = I->getOperand(0)->getType();
1070     if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1071         // TODO: For now, not handling conversions like:
1072         // (bitcast i64 %x to <2 x i32>)
1073         !I->getType()->isVectorTy()) {
1074       computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1075       break;
1076     }
1077     break;
1078   }
1079   case Instruction::SExt: {
1080     // Compute the bits in the result that are not present in the input.
1081     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1082 
1083     KnownZero = KnownZero.trunc(SrcBitWidth);
1084     KnownOne = KnownOne.trunc(SrcBitWidth);
1085     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1086     // If the sign bit of the input is known set or clear, then we know the
1087     // top bits of the result.
1088     KnownZero = KnownZero.sext(BitWidth);
1089     KnownOne = KnownOne.sext(BitWidth);
1090     break;
1091   }
1092   case Instruction::Shl: {
1093     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1094     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1095     auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1096       APInt KZResult = KnownZero << ShiftAmt;
1097       KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1098       // If this shift has "nsw" keyword, then the result is either a poison
1099       // value or has the same sign bit as the first operand.
1100       if (NSW && KnownZero.isSignBitSet())
1101         KZResult.setSignBit();
1102       return KZResult;
1103     };
1104 
1105     auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1106       APInt KOResult = KnownOne << ShiftAmt;
1107       if (NSW && KnownOne.isSignBitSet())
1108         KOResult.setSignBit();
1109       return KOResult;
1110     };
1111 
1112     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1113                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1114                                       KOF);
1115     break;
1116   }
1117   case Instruction::LShr: {
1118     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1119     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1120       APInt KZResult = KnownZero.lshr(ShiftAmt);
1121       // High bits known zero.
1122       KZResult.setHighBits(ShiftAmt);
1123       return KZResult;
1124     };
1125 
1126     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1127       return KnownOne.lshr(ShiftAmt);
1128     };
1129 
1130     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1131                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1132                                       KOF);
1133     break;
1134   }
1135   case Instruction::AShr: {
1136     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1137     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1138       return KnownZero.ashr(ShiftAmt);
1139     };
1140 
1141     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1142       return KnownOne.ashr(ShiftAmt);
1143     };
1144 
1145     computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1146                                       KnownZero2, KnownOne2, Depth, Q, KZF,
1147                                       KOF);
1148     break;
1149   }
1150   case Instruction::Sub: {
1151     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1152     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1153                            KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1154                            Q);
1155     break;
1156   }
1157   case Instruction::Add: {
1158     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1159     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1160                            KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1161                            Q);
1162     break;
1163   }
1164   case Instruction::SRem:
1165     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1166       APInt RA = Rem->getValue().abs();
1167       if (RA.isPowerOf2()) {
1168         APInt LowBits = RA - 1;
1169         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1,
1170                          Q);
1171 
1172         // The low bits of the first operand are unchanged by the srem.
1173         KnownZero = KnownZero2 & LowBits;
1174         KnownOne = KnownOne2 & LowBits;
1175 
1176         // If the first operand is non-negative or has all low bits zero, then
1177         // the upper bits are all zero.
1178         if (KnownZero2.isSignBitSet() || ((KnownZero2 & LowBits) == LowBits))
1179           KnownZero |= ~LowBits;
1180 
1181         // If the first operand is negative and not all low bits are zero, then
1182         // the upper bits are all one.
1183         if (KnownOne2.isSignBitSet() && ((KnownOne2 & LowBits) != 0))
1184           KnownOne |= ~LowBits;
1185 
1186         assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1187         break;
1188       }
1189     }
1190 
1191     // The sign bit is the LHS's sign bit, except when the result of the
1192     // remainder is zero.
1193     computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1194     // If it's known zero, our sign bit is also zero.
1195     if (KnownZero2.isSignBitSet())
1196       KnownZero.setSignBit();
1197 
1198     break;
1199   case Instruction::URem: {
1200     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1201       const APInt &RA = Rem->getValue();
1202       if (RA.isPowerOf2()) {
1203         APInt LowBits = (RA - 1);
1204         computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1205         KnownZero |= ~LowBits;
1206         KnownOne &= LowBits;
1207         break;
1208       }
1209     }
1210 
1211     // Since the result is less than or equal to either operand, any leading
1212     // zero bits in either operand must also exist in the result.
1213     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1214     computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q);
1215 
1216     unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
1217                                 KnownZero2.countLeadingOnes());
1218     KnownOne.clearAllBits();
1219     KnownZero.clearAllBits();
1220     KnownZero.setHighBits(Leaders);
1221     break;
1222   }
1223 
1224   case Instruction::Alloca: {
1225     const AllocaInst *AI = cast<AllocaInst>(I);
1226     unsigned Align = AI->getAlignment();
1227     if (Align == 0)
1228       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1229 
1230     if (Align > 0)
1231       KnownZero.setLowBits(countTrailingZeros(Align));
1232     break;
1233   }
1234   case Instruction::GetElementPtr: {
1235     // Analyze all of the subscripts of this getelementptr instruction
1236     // to determine if we can prove known low zero bits.
1237     APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1238     computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, Depth + 1,
1239                      Q);
1240     unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1241 
1242     gep_type_iterator GTI = gep_type_begin(I);
1243     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1244       Value *Index = I->getOperand(i);
1245       if (StructType *STy = GTI.getStructTypeOrNull()) {
1246         // Handle struct member offset arithmetic.
1247 
1248         // Handle case when index is vector zeroinitializer
1249         Constant *CIndex = cast<Constant>(Index);
1250         if (CIndex->isZeroValue())
1251           continue;
1252 
1253         if (CIndex->getType()->isVectorTy())
1254           Index = CIndex->getSplatValue();
1255 
1256         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1257         const StructLayout *SL = Q.DL.getStructLayout(STy);
1258         uint64_t Offset = SL->getElementOffset(Idx);
1259         TrailZ = std::min<unsigned>(TrailZ,
1260                                     countTrailingZeros(Offset));
1261       } else {
1262         // Handle array index arithmetic.
1263         Type *IndexedTy = GTI.getIndexedType();
1264         if (!IndexedTy->isSized()) {
1265           TrailZ = 0;
1266           break;
1267         }
1268         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1269         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1270         LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1271         computeKnownBits(Index, LocalKnownZero, LocalKnownOne, Depth + 1, Q);
1272         TrailZ = std::min(TrailZ,
1273                           unsigned(countTrailingZeros(TypeSize) +
1274                                    LocalKnownZero.countTrailingOnes()));
1275       }
1276     }
1277 
1278     KnownZero.setLowBits(TrailZ);
1279     break;
1280   }
1281   case Instruction::PHI: {
1282     const PHINode *P = cast<PHINode>(I);
1283     // Handle the case of a simple two-predecessor recurrence PHI.
1284     // There's a lot more that could theoretically be done here, but
1285     // this is sufficient to catch some interesting cases.
1286     if (P->getNumIncomingValues() == 2) {
1287       for (unsigned i = 0; i != 2; ++i) {
1288         Value *L = P->getIncomingValue(i);
1289         Value *R = P->getIncomingValue(!i);
1290         Operator *LU = dyn_cast<Operator>(L);
1291         if (!LU)
1292           continue;
1293         unsigned Opcode = LU->getOpcode();
1294         // Check for operations that have the property that if
1295         // both their operands have low zero bits, the result
1296         // will have low zero bits.
1297         if (Opcode == Instruction::Add ||
1298             Opcode == Instruction::Sub ||
1299             Opcode == Instruction::And ||
1300             Opcode == Instruction::Or ||
1301             Opcode == Instruction::Mul) {
1302           Value *LL = LU->getOperand(0);
1303           Value *LR = LU->getOperand(1);
1304           // Find a recurrence.
1305           if (LL == I)
1306             L = LR;
1307           else if (LR == I)
1308             L = LL;
1309           else
1310             break;
1311           // Ok, we have a PHI of the form L op= R. Check for low
1312           // zero bits.
1313           computeKnownBits(R, KnownZero2, KnownOne2, Depth + 1, Q);
1314 
1315           // We need to take the minimum number of known bits
1316           APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
1317           computeKnownBits(L, KnownZero3, KnownOne3, Depth + 1, Q);
1318 
1319           KnownZero.setLowBits(std::min(KnownZero2.countTrailingOnes(),
1320                                         KnownZero3.countTrailingOnes()));
1321 
1322           if (DontImproveNonNegativePhiBits)
1323             break;
1324 
1325           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1326           if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1327             // If initial value of recurrence is nonnegative, and we are adding
1328             // a nonnegative number with nsw, the result can only be nonnegative
1329             // or poison value regardless of the number of times we execute the
1330             // add in phi recurrence. If initial value is negative and we are
1331             // adding a negative number with nsw, the result can only be
1332             // negative or poison value. Similar arguments apply to sub and mul.
1333             //
1334             // (add non-negative, non-negative) --> non-negative
1335             // (add negative, negative) --> negative
1336             if (Opcode == Instruction::Add) {
1337               if (KnownZero2.isSignBitSet() && KnownZero3.isSignBitSet())
1338                 KnownZero.setSignBit();
1339               else if (KnownOne2.isSignBitSet() && KnownOne3.isSignBitSet())
1340                 KnownOne.setSignBit();
1341             }
1342 
1343             // (sub nsw non-negative, negative) --> non-negative
1344             // (sub nsw negative, non-negative) --> negative
1345             else if (Opcode == Instruction::Sub && LL == I) {
1346               if (KnownZero2.isSignBitSet() && KnownOne3.isSignBitSet())
1347                 KnownZero.setSignBit();
1348               else if (KnownOne2.isSignBitSet() && KnownZero3.isSignBitSet())
1349                 KnownOne.setSignBit();
1350             }
1351 
1352             // (mul nsw non-negative, non-negative) --> non-negative
1353             else if (Opcode == Instruction::Mul && KnownZero2.isSignBitSet() &&
1354                      KnownZero3.isSignBitSet())
1355               KnownZero.setSignBit();
1356           }
1357 
1358           break;
1359         }
1360       }
1361     }
1362 
1363     // Unreachable blocks may have zero-operand PHI nodes.
1364     if (P->getNumIncomingValues() == 0)
1365       break;
1366 
1367     // Otherwise take the unions of the known bit sets of the operands,
1368     // taking conservative care to avoid excessive recursion.
1369     if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
1370       // Skip if every incoming value references to ourself.
1371       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1372         break;
1373 
1374       KnownZero.setAllBits();
1375       KnownOne.setAllBits();
1376       for (Value *IncValue : P->incoming_values()) {
1377         // Skip direct self references.
1378         if (IncValue == P) continue;
1379 
1380         KnownZero2 = APInt(BitWidth, 0);
1381         KnownOne2 = APInt(BitWidth, 0);
1382         // Recurse, but cap the recursion to one level, because we don't
1383         // want to waste time spinning around in loops.
1384         computeKnownBits(IncValue, KnownZero2, KnownOne2, MaxDepth - 1, Q);
1385         KnownZero &= KnownZero2;
1386         KnownOne &= KnownOne2;
1387         // If all bits have been ruled out, there's no need to check
1388         // more operands.
1389         if (!KnownZero && !KnownOne)
1390           break;
1391       }
1392     }
1393     break;
1394   }
1395   case Instruction::Call:
1396   case Instruction::Invoke:
1397     // If range metadata is attached to this call, set known bits from that,
1398     // and then intersect with known bits based on other properties of the
1399     // function.
1400     if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1401       computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
1402     if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1403       computeKnownBits(RV, KnownZero2, KnownOne2, Depth + 1, Q);
1404       KnownZero |= KnownZero2;
1405       KnownOne |= KnownOne2;
1406     }
1407     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1408       switch (II->getIntrinsicID()) {
1409       default: break;
1410       case Intrinsic::bitreverse:
1411         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1412         KnownZero |= KnownZero2.reverseBits();
1413         KnownOne |= KnownOne2.reverseBits();
1414         break;
1415       case Intrinsic::bswap:
1416         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1417         KnownZero |= KnownZero2.byteSwap();
1418         KnownOne |= KnownOne2.byteSwap();
1419         break;
1420       case Intrinsic::ctlz:
1421       case Intrinsic::cttz: {
1422         unsigned LowBits = Log2_32(BitWidth)+1;
1423         // If this call is undefined for 0, the result will be less than 2^n.
1424         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1425           LowBits -= 1;
1426         KnownZero.setBitsFrom(LowBits);
1427         break;
1428       }
1429       case Intrinsic::ctpop: {
1430         computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q);
1431         // We can bound the space the count needs.  Also, bits known to be zero
1432         // can't contribute to the population.
1433         unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
1434         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1435         KnownZero.setBitsFrom(LowBits);
1436         // TODO: we could bound KnownOne using the lower bound on the number
1437         // of bits which might be set provided by popcnt KnownOne2.
1438         break;
1439       }
1440       case Intrinsic::x86_sse42_crc32_64_64:
1441         KnownZero.setBitsFrom(32);
1442         break;
1443       }
1444     }
1445     break;
1446   case Instruction::ExtractElement:
1447     // Look through extract element. At the moment we keep this simple and skip
1448     // tracking the specific element. But at least we might find information
1449     // valid for all elements of the vector (for example if vector is sign
1450     // extended, shifted, etc).
1451     computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
1452     break;
1453   case Instruction::ExtractValue:
1454     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1455       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1456       if (EVI->getNumIndices() != 1) break;
1457       if (EVI->getIndices()[0] == 0) {
1458         switch (II->getIntrinsicID()) {
1459         default: break;
1460         case Intrinsic::uadd_with_overflow:
1461         case Intrinsic::sadd_with_overflow:
1462           computeKnownBitsAddSub(true, II->getArgOperand(0),
1463                                  II->getArgOperand(1), false, KnownZero,
1464                                  KnownOne, KnownZero2, KnownOne2, Depth, Q);
1465           break;
1466         case Intrinsic::usub_with_overflow:
1467         case Intrinsic::ssub_with_overflow:
1468           computeKnownBitsAddSub(false, II->getArgOperand(0),
1469                                  II->getArgOperand(1), false, KnownZero,
1470                                  KnownOne, KnownZero2, KnownOne2, Depth, Q);
1471           break;
1472         case Intrinsic::umul_with_overflow:
1473         case Intrinsic::smul_with_overflow:
1474           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1475                               KnownZero, KnownOne, KnownZero2, KnownOne2, Depth,
1476                               Q);
1477           break;
1478         }
1479       }
1480     }
1481   }
1482 }
1483 
1484 /// Determine which bits of V are known to be either zero or one and return
1485 /// them in the KnownZero/KnownOne bit sets.
1486 ///
1487 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1488 /// we cannot optimize based on the assumption that it is zero without changing
1489 /// it to be an explicit zero.  If we don't change it to zero, other code could
1490 /// optimized based on the contradictory assumption that it is non-zero.
1491 /// Because instcombine aggressively folds operations with undef args anyway,
1492 /// this won't lose us code quality.
1493 ///
1494 /// This function is defined on values with integer type, values with pointer
1495 /// type, and vectors of integers.  In the case
1496 /// where V is a vector, known zero, and known one values are the
1497 /// same width as the vector element, and the bit is set only if it is true
1498 /// for all of the elements in the vector.
1499 void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
1500                       unsigned Depth, const Query &Q) {
1501   assert(V && "No Value?");
1502   assert(Depth <= MaxDepth && "Limit Search Depth");
1503   unsigned BitWidth = KnownZero.getBitWidth();
1504 
1505   assert((V->getType()->isIntOrIntVectorTy() ||
1506           V->getType()->getScalarType()->isPointerTy()) &&
1507          "Not integer or pointer type!");
1508   assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1509          (!V->getType()->isIntOrIntVectorTy() ||
1510           V->getType()->getScalarSizeInBits() == BitWidth) &&
1511          KnownZero.getBitWidth() == BitWidth &&
1512          KnownOne.getBitWidth() == BitWidth &&
1513          "V, KnownOne and KnownZero should have same BitWidth");
1514   (void)BitWidth;
1515 
1516   const APInt *C;
1517   if (match(V, m_APInt(C))) {
1518     // We know all of the bits for a scalar constant or a splat vector constant!
1519     KnownOne = *C;
1520     KnownZero = ~KnownOne;
1521     return;
1522   }
1523   // Null and aggregate-zero are all-zeros.
1524   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1525     KnownOne.clearAllBits();
1526     KnownZero.setAllBits();
1527     return;
1528   }
1529   // Handle a constant vector by taking the intersection of the known bits of
1530   // each element.
1531   if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1532     // We know that CDS must be a vector of integers. Take the intersection of
1533     // each element.
1534     KnownZero.setAllBits(); KnownOne.setAllBits();
1535     APInt Elt(KnownZero.getBitWidth(), 0);
1536     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1537       Elt = CDS->getElementAsInteger(i);
1538       KnownZero &= ~Elt;
1539       KnownOne &= Elt;
1540     }
1541     return;
1542   }
1543 
1544   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1545     // We know that CV must be a vector of integers. Take the intersection of
1546     // each element.
1547     KnownZero.setAllBits(); KnownOne.setAllBits();
1548     APInt Elt(KnownZero.getBitWidth(), 0);
1549     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1550       Constant *Element = CV->getAggregateElement(i);
1551       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1552       if (!ElementCI) {
1553         KnownZero.clearAllBits();
1554         KnownOne.clearAllBits();
1555         return;
1556       }
1557       Elt = ElementCI->getValue();
1558       KnownZero &= ~Elt;
1559       KnownOne &= Elt;
1560     }
1561     return;
1562   }
1563 
1564   // Start out not knowing anything.
1565   KnownZero.clearAllBits(); KnownOne.clearAllBits();
1566 
1567   // We can't imply anything about undefs.
1568   if (isa<UndefValue>(V))
1569     return;
1570 
1571   // There's no point in looking through other users of ConstantData for
1572   // assumptions.  Confirm that we've handled them all.
1573   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1574 
1575   // Limit search depth.
1576   // All recursive calls that increase depth must come after this.
1577   if (Depth == MaxDepth)
1578     return;
1579 
1580   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1581   // the bits of its aliasee.
1582   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1583     if (!GA->isInterposable())
1584       computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q);
1585     return;
1586   }
1587 
1588   if (const Operator *I = dyn_cast<Operator>(V))
1589     computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q);
1590 
1591   // Aligned pointers have trailing zeros - refine KnownZero set
1592   if (V->getType()->isPointerTy()) {
1593     unsigned Align = V->getPointerAlignment(Q.DL);
1594     if (Align)
1595       KnownZero.setLowBits(countTrailingZeros(Align));
1596   }
1597 
1598   // computeKnownBitsFromAssume strictly refines KnownZero and
1599   // KnownOne. Therefore, we run them after computeKnownBitsFromOperator.
1600 
1601   // Check whether a nearby assume intrinsic can determine some known bits.
1602   computeKnownBitsFromAssume(V, KnownZero, KnownOne, Depth, Q);
1603 
1604   assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1605 }
1606 
1607 /// Determine whether the sign bit is known to be zero or one.
1608 /// Convenience wrapper around computeKnownBits.
1609 void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
1610                     unsigned Depth, const Query &Q) {
1611   unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
1612   if (!BitWidth) {
1613     KnownZero = false;
1614     KnownOne = false;
1615     return;
1616   }
1617   APInt ZeroBits(BitWidth, 0);
1618   APInt OneBits(BitWidth, 0);
1619   computeKnownBits(V, ZeroBits, OneBits, Depth, Q);
1620   KnownOne = OneBits.isSignBitSet();
1621   KnownZero = ZeroBits.isSignBitSet();
1622 }
1623 
1624 /// Return true if the given value is known to have exactly one
1625 /// bit set when defined. For vectors return true if every element is known to
1626 /// be a power of two when defined. Supports values with integer or pointer
1627 /// types and vectors of integers.
1628 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1629                             const Query &Q) {
1630   if (const Constant *C = dyn_cast<Constant>(V)) {
1631     if (C->isNullValue())
1632       return OrZero;
1633 
1634     const APInt *ConstIntOrConstSplatInt;
1635     if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1636       return ConstIntOrConstSplatInt->isPowerOf2();
1637   }
1638 
1639   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1640   // it is shifted off the end then the result is undefined.
1641   if (match(V, m_Shl(m_One(), m_Value())))
1642     return true;
1643 
1644   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1645   // the bottom.  If it is shifted off the bottom then the result is undefined.
1646   if (match(V, m_LShr(m_SignMask(), m_Value())))
1647     return true;
1648 
1649   // The remaining tests are all recursive, so bail out if we hit the limit.
1650   if (Depth++ == MaxDepth)
1651     return false;
1652 
1653   Value *X = nullptr, *Y = nullptr;
1654   // A shift left or a logical shift right of a power of two is a power of two
1655   // or zero.
1656   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1657                  match(V, m_LShr(m_Value(X), m_Value()))))
1658     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1659 
1660   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1661     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1662 
1663   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1664     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1665            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1666 
1667   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1668     // A power of two and'd with anything is a power of two or zero.
1669     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1670         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1671       return true;
1672     // X & (-X) is always a power of two or zero.
1673     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1674       return true;
1675     return false;
1676   }
1677 
1678   // Adding a power-of-two or zero to the same power-of-two or zero yields
1679   // either the original power-of-two, a larger power-of-two or zero.
1680   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1681     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1682     if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1683       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1684           match(X, m_And(m_Value(), m_Specific(Y))))
1685         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1686           return true;
1687       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1688           match(Y, m_And(m_Value(), m_Specific(X))))
1689         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1690           return true;
1691 
1692       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1693       APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
1694       computeKnownBits(X, LHSZeroBits, LHSOneBits, Depth, Q);
1695 
1696       APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
1697       computeKnownBits(Y, RHSZeroBits, RHSOneBits, Depth, Q);
1698       // If i8 V is a power of two or zero:
1699       //  ZeroBits: 1 1 1 0 1 1 1 1
1700       // ~ZeroBits: 0 0 0 1 0 0 0 0
1701       if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
1702         // If OrZero isn't set, we cannot give back a zero result.
1703         // Make sure either the LHS or RHS has a bit set.
1704         if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
1705           return true;
1706     }
1707   }
1708 
1709   // An exact divide or right shift can only shift off zero bits, so the result
1710   // is a power of two only if the first operand is a power of two and not
1711   // copying a sign bit (sdiv int_min, 2).
1712   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1713       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1714     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1715                                   Depth, Q);
1716   }
1717 
1718   return false;
1719 }
1720 
1721 /// \brief Test whether a GEP's result is known to be non-null.
1722 ///
1723 /// Uses properties inherent in a GEP to try to determine whether it is known
1724 /// to be non-null.
1725 ///
1726 /// Currently this routine does not support vector GEPs.
1727 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1728                               const Query &Q) {
1729   if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1730     return false;
1731 
1732   // FIXME: Support vector-GEPs.
1733   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1734 
1735   // If the base pointer is non-null, we cannot walk to a null address with an
1736   // inbounds GEP in address space zero.
1737   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1738     return true;
1739 
1740   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1741   // If so, then the GEP cannot produce a null pointer, as doing so would
1742   // inherently violate the inbounds contract within address space zero.
1743   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1744        GTI != GTE; ++GTI) {
1745     // Struct types are easy -- they must always be indexed by a constant.
1746     if (StructType *STy = GTI.getStructTypeOrNull()) {
1747       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1748       unsigned ElementIdx = OpC->getZExtValue();
1749       const StructLayout *SL = Q.DL.getStructLayout(STy);
1750       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1751       if (ElementOffset > 0)
1752         return true;
1753       continue;
1754     }
1755 
1756     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1757     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1758       continue;
1759 
1760     // Fast path the constant operand case both for efficiency and so we don't
1761     // increment Depth when just zipping down an all-constant GEP.
1762     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1763       if (!OpC->isZero())
1764         return true;
1765       continue;
1766     }
1767 
1768     // We post-increment Depth here because while isKnownNonZero increments it
1769     // as well, when we pop back up that increment won't persist. We don't want
1770     // to recurse 10k times just because we have 10k GEP operands. We don't
1771     // bail completely out because we want to handle constant GEPs regardless
1772     // of depth.
1773     if (Depth++ >= MaxDepth)
1774       continue;
1775 
1776     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1777       return true;
1778   }
1779 
1780   return false;
1781 }
1782 
1783 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1784 /// ensure that the value it's attached to is never Value?  'RangeType' is
1785 /// is the type of the value described by the range.
1786 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1787   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1788   assert(NumRanges >= 1);
1789   for (unsigned i = 0; i < NumRanges; ++i) {
1790     ConstantInt *Lower =
1791         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1792     ConstantInt *Upper =
1793         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1794     ConstantRange Range(Lower->getValue(), Upper->getValue());
1795     if (Range.contains(Value))
1796       return false;
1797   }
1798   return true;
1799 }
1800 
1801 /// Return true if the given value is known to be non-zero when defined. For
1802 /// vectors, return true if every element is known to be non-zero when
1803 /// defined. For pointers, if the context instruction and dominator tree are
1804 /// specified, perform context-sensitive analysis and return true if the
1805 /// pointer couldn't possibly be null at the specified instruction.
1806 /// Supports values with integer or pointer type and vectors of integers.
1807 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1808   if (auto *C = dyn_cast<Constant>(V)) {
1809     if (C->isNullValue())
1810       return false;
1811     if (isa<ConstantInt>(C))
1812       // Must be non-zero due to null test above.
1813       return true;
1814 
1815     // For constant vectors, check that all elements are undefined or known
1816     // non-zero to determine that the whole vector is known non-zero.
1817     if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1818       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1819         Constant *Elt = C->getAggregateElement(i);
1820         if (!Elt || Elt->isNullValue())
1821           return false;
1822         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1823           return false;
1824       }
1825       return true;
1826     }
1827 
1828     return false;
1829   }
1830 
1831   if (auto *I = dyn_cast<Instruction>(V)) {
1832     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1833       // If the possible ranges don't contain zero, then the value is
1834       // definitely non-zero.
1835       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1836         const APInt ZeroValue(Ty->getBitWidth(), 0);
1837         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1838           return true;
1839       }
1840     }
1841   }
1842 
1843   // The remaining tests are all recursive, so bail out if we hit the limit.
1844   if (Depth++ >= MaxDepth)
1845     return false;
1846 
1847   // Check for pointer simplifications.
1848   if (V->getType()->isPointerTy()) {
1849     if (isKnownNonNullAt(V, Q.CxtI, Q.DT))
1850       return true;
1851     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1852       if (isGEPKnownNonNull(GEP, Depth, Q))
1853         return true;
1854   }
1855 
1856   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1857 
1858   // X | Y != 0 if X != 0 or Y != 0.
1859   Value *X = nullptr, *Y = nullptr;
1860   if (match(V, m_Or(m_Value(X), m_Value(Y))))
1861     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1862 
1863   // ext X != 0 if X != 0.
1864   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1865     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1866 
1867   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
1868   // if the lowest bit is shifted off the end.
1869   if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1870     // shl nuw can't remove any non-zero bits.
1871     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1872     if (BO->hasNoUnsignedWrap())
1873       return isKnownNonZero(X, Depth, Q);
1874 
1875     APInt KnownZero(BitWidth, 0);
1876     APInt KnownOne(BitWidth, 0);
1877     computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1878     if (KnownOne[0])
1879       return true;
1880   }
1881   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
1882   // defined if the sign bit is shifted off the end.
1883   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1884     // shr exact can only shift out zero bits.
1885     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1886     if (BO->isExact())
1887       return isKnownNonZero(X, Depth, Q);
1888 
1889     bool XKnownNonNegative, XKnownNegative;
1890     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1891     if (XKnownNegative)
1892       return true;
1893 
1894     // If the shifter operand is a constant, and all of the bits shifted
1895     // out are known to be zero, and X is known non-zero then at least one
1896     // non-zero bit must remain.
1897     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1898       APInt KnownZero(BitWidth, 0);
1899       APInt KnownOne(BitWidth, 0);
1900       computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1901 
1902       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1903       // Is there a known one in the portion not shifted out?
1904       if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
1905         return true;
1906       // Are all the bits to be shifted out known zero?
1907       if (KnownZero.countTrailingOnes() >= ShiftVal)
1908         return isKnownNonZero(X, Depth, Q);
1909     }
1910   }
1911   // div exact can only produce a zero if the dividend is zero.
1912   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1913     return isKnownNonZero(X, Depth, Q);
1914   }
1915   // X + Y.
1916   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1917     bool XKnownNonNegative, XKnownNegative;
1918     bool YKnownNonNegative, YKnownNegative;
1919     ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q);
1920     ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Depth, Q);
1921 
1922     // If X and Y are both non-negative (as signed values) then their sum is not
1923     // zero unless both X and Y are zero.
1924     if (XKnownNonNegative && YKnownNonNegative)
1925       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1926         return true;
1927 
1928     // If X and Y are both negative (as signed values) then their sum is not
1929     // zero unless both X and Y equal INT_MIN.
1930     if (BitWidth && XKnownNegative && YKnownNegative) {
1931       APInt KnownZero(BitWidth, 0);
1932       APInt KnownOne(BitWidth, 0);
1933       APInt Mask = APInt::getSignedMaxValue(BitWidth);
1934       // The sign bit of X is set.  If some other bit is set then X is not equal
1935       // to INT_MIN.
1936       computeKnownBits(X, KnownZero, KnownOne, Depth, Q);
1937       if ((KnownOne & Mask) != 0)
1938         return true;
1939       // The sign bit of Y is set.  If some other bit is set then Y is not equal
1940       // to INT_MIN.
1941       computeKnownBits(Y, KnownZero, KnownOne, Depth, Q);
1942       if ((KnownOne & Mask) != 0)
1943         return true;
1944     }
1945 
1946     // The sum of a non-negative number and a power of two is not zero.
1947     if (XKnownNonNegative &&
1948         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1949       return true;
1950     if (YKnownNonNegative &&
1951         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1952       return true;
1953   }
1954   // X * Y.
1955   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1956     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1957     // If X and Y are non-zero then so is X * Y as long as the multiplication
1958     // does not overflow.
1959     if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1960         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1961       return true;
1962   }
1963   // (C ? X : Y) != 0 if X != 0 and Y != 0.
1964   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
1965     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1966         isKnownNonZero(SI->getFalseValue(), Depth, Q))
1967       return true;
1968   }
1969   // PHI
1970   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
1971     // Try and detect a recurrence that monotonically increases from a
1972     // starting value, as these are common as induction variables.
1973     if (PN->getNumIncomingValues() == 2) {
1974       Value *Start = PN->getIncomingValue(0);
1975       Value *Induction = PN->getIncomingValue(1);
1976       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1977         std::swap(Start, Induction);
1978       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1979         if (!C->isZero() && !C->isNegative()) {
1980           ConstantInt *X;
1981           if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1982                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1983               !X->isNegative())
1984             return true;
1985         }
1986       }
1987     }
1988     // Check if all incoming values are non-zero constant.
1989     bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) {
1990       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue();
1991     });
1992     if (AllNonZeroConstants)
1993       return true;
1994   }
1995 
1996   if (!BitWidth) return false;
1997   APInt KnownZero(BitWidth, 0);
1998   APInt KnownOne(BitWidth, 0);
1999   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2000   return KnownOne != 0;
2001 }
2002 
2003 /// Return true if V2 == V1 + X, where X is known non-zero.
2004 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2005   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2006   if (!BO || BO->getOpcode() != Instruction::Add)
2007     return false;
2008   Value *Op = nullptr;
2009   if (V2 == BO->getOperand(0))
2010     Op = BO->getOperand(1);
2011   else if (V2 == BO->getOperand(1))
2012     Op = BO->getOperand(0);
2013   else
2014     return false;
2015   return isKnownNonZero(Op, 0, Q);
2016 }
2017 
2018 /// Return true if it is known that V1 != V2.
2019 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2020   if (V1->getType()->isVectorTy() || V1 == V2)
2021     return false;
2022   if (V1->getType() != V2->getType())
2023     // We can't look through casts yet.
2024     return false;
2025   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2026     return true;
2027 
2028   if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) {
2029     // Are any known bits in V1 contradictory to known bits in V2? If V1
2030     // has a known zero where V2 has a known one, they must not be equal.
2031     auto BitWidth = Ty->getBitWidth();
2032     APInt KnownZero1(BitWidth, 0);
2033     APInt KnownOne1(BitWidth, 0);
2034     computeKnownBits(V1, KnownZero1, KnownOne1, 0, Q);
2035     APInt KnownZero2(BitWidth, 0);
2036     APInt KnownOne2(BitWidth, 0);
2037     computeKnownBits(V2, KnownZero2, KnownOne2, 0, Q);
2038 
2039     auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1);
2040     if (OppositeBits.getBoolValue())
2041       return true;
2042   }
2043   return false;
2044 }
2045 
2046 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2047 /// simplify operations downstream. Mask is known to be zero for bits that V
2048 /// cannot have.
2049 ///
2050 /// This function is defined on values with integer type, values with pointer
2051 /// type, and vectors of integers.  In the case
2052 /// where V is a vector, the mask, known zero, and known one values are the
2053 /// same width as the vector element, and the bit is set only if it is true
2054 /// for all of the elements in the vector.
2055 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2056                        const Query &Q) {
2057   APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
2058   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2059   return (KnownZero & Mask) == Mask;
2060 }
2061 
2062 /// For vector constants, loop over the elements and find the constant with the
2063 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2064 /// or if any element was not analyzed; otherwise, return the count for the
2065 /// element with the minimum number of sign bits.
2066 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2067                                                  unsigned TyBits) {
2068   const auto *CV = dyn_cast<Constant>(V);
2069   if (!CV || !CV->getType()->isVectorTy())
2070     return 0;
2071 
2072   unsigned MinSignBits = TyBits;
2073   unsigned NumElts = CV->getType()->getVectorNumElements();
2074   for (unsigned i = 0; i != NumElts; ++i) {
2075     // If we find a non-ConstantInt, bail out.
2076     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2077     if (!Elt)
2078       return 0;
2079 
2080     // If the sign bit is 1, flip the bits, so we always count leading zeros.
2081     APInt EltVal = Elt->getValue();
2082     if (EltVal.isNegative())
2083       EltVal = ~EltVal;
2084     MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros());
2085   }
2086 
2087   return MinSignBits;
2088 }
2089 
2090 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2091                                        const Query &Q);
2092 
2093 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2094                                    const Query &Q) {
2095   unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2096   assert(Result > 0 && "At least one sign bit needs to be present!");
2097   return Result;
2098 }
2099 
2100 /// Return the number of times the sign bit of the register is replicated into
2101 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2102 /// (itself), but other cases can give us information. For example, immediately
2103 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2104 /// other, so we return 3. For vectors, return the number of sign bits for the
2105 /// vector element with the mininum number of known sign bits.
2106 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2107                                        const Query &Q) {
2108 
2109   // We return the minimum number of sign bits that are guaranteed to be present
2110   // in V, so for undef we have to conservatively return 1.  We don't have the
2111   // same behavior for poison though -- that's a FIXME today.
2112 
2113   unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2114   unsigned Tmp, Tmp2;
2115   unsigned FirstAnswer = 1;
2116 
2117   // Note that ConstantInt is handled by the general computeKnownBits case
2118   // below.
2119 
2120   if (Depth == MaxDepth)
2121     return 1;  // Limit search depth.
2122 
2123   const Operator *U = dyn_cast<Operator>(V);
2124   switch (Operator::getOpcode(V)) {
2125   default: break;
2126   case Instruction::SExt:
2127     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2128     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2129 
2130   case Instruction::SDiv: {
2131     const APInt *Denominator;
2132     // sdiv X, C -> adds log(C) sign bits.
2133     if (match(U->getOperand(1), m_APInt(Denominator))) {
2134 
2135       // Ignore non-positive denominator.
2136       if (!Denominator->isStrictlyPositive())
2137         break;
2138 
2139       // Calculate the incoming numerator bits.
2140       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2141 
2142       // Add floor(log(C)) bits to the numerator bits.
2143       return std::min(TyBits, NumBits + Denominator->logBase2());
2144     }
2145     break;
2146   }
2147 
2148   case Instruction::SRem: {
2149     const APInt *Denominator;
2150     // srem X, C -> we know that the result is within [-C+1,C) when C is a
2151     // positive constant.  This let us put a lower bound on the number of sign
2152     // bits.
2153     if (match(U->getOperand(1), m_APInt(Denominator))) {
2154 
2155       // Ignore non-positive denominator.
2156       if (!Denominator->isStrictlyPositive())
2157         break;
2158 
2159       // Calculate the incoming numerator bits. SRem by a positive constant
2160       // can't lower the number of sign bits.
2161       unsigned NumrBits =
2162           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2163 
2164       // Calculate the leading sign bit constraints by examining the
2165       // denominator.  Given that the denominator is positive, there are two
2166       // cases:
2167       //
2168       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
2169       //     (1 << ceilLogBase2(C)).
2170       //
2171       //  2. the numerator is negative.  Then the result range is (-C,0] and
2172       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2173       //
2174       // Thus a lower bound on the number of sign bits is `TyBits -
2175       // ceilLogBase2(C)`.
2176 
2177       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2178       return std::max(NumrBits, ResBits);
2179     }
2180     break;
2181   }
2182 
2183   case Instruction::AShr: {
2184     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2185     // ashr X, C   -> adds C sign bits.  Vectors too.
2186     const APInt *ShAmt;
2187     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2188       unsigned ShAmtLimited = ShAmt->getZExtValue();
2189       if (ShAmtLimited >= TyBits)
2190         break;  // Bad shift.
2191       Tmp += ShAmtLimited;
2192       if (Tmp > TyBits) Tmp = TyBits;
2193     }
2194     return Tmp;
2195   }
2196   case Instruction::Shl: {
2197     const APInt *ShAmt;
2198     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2199       // shl destroys sign bits.
2200       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2201       Tmp2 = ShAmt->getZExtValue();
2202       if (Tmp2 >= TyBits ||      // Bad shift.
2203           Tmp2 >= Tmp) break;    // Shifted all sign bits out.
2204       return Tmp - Tmp2;
2205     }
2206     break;
2207   }
2208   case Instruction::And:
2209   case Instruction::Or:
2210   case Instruction::Xor:    // NOT is handled here.
2211     // Logical binary ops preserve the number of sign bits at the worst.
2212     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2213     if (Tmp != 1) {
2214       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2215       FirstAnswer = std::min(Tmp, Tmp2);
2216       // We computed what we know about the sign bits as our first
2217       // answer. Now proceed to the generic code that uses
2218       // computeKnownBits, and pick whichever answer is better.
2219     }
2220     break;
2221 
2222   case Instruction::Select:
2223     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2224     if (Tmp == 1) return 1;  // Early out.
2225     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2226     return std::min(Tmp, Tmp2);
2227 
2228   case Instruction::Add:
2229     // Add can have at most one carry bit.  Thus we know that the output
2230     // is, at worst, one more bit than the inputs.
2231     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2232     if (Tmp == 1) return 1;  // Early out.
2233 
2234     // Special case decrementing a value (ADD X, -1):
2235     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2236       if (CRHS->isAllOnesValue()) {
2237         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2238         computeKnownBits(U->getOperand(0), KnownZero, KnownOne, Depth + 1, Q);
2239 
2240         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2241         // sign bits set.
2242         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2243           return TyBits;
2244 
2245         // If we are subtracting one from a positive number, there is no carry
2246         // out of the result.
2247         if (KnownZero.isSignBitSet())
2248           return Tmp;
2249       }
2250 
2251     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2252     if (Tmp2 == 1) return 1;
2253     return std::min(Tmp, Tmp2)-1;
2254 
2255   case Instruction::Sub:
2256     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2257     if (Tmp2 == 1) return 1;
2258 
2259     // Handle NEG.
2260     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2261       if (CLHS->isNullValue()) {
2262         APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2263         computeKnownBits(U->getOperand(1), KnownZero, KnownOne, Depth + 1, Q);
2264         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2265         // sign bits set.
2266         if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2267           return TyBits;
2268 
2269         // If the input is known to be positive (the sign bit is known clear),
2270         // the output of the NEG has the same number of sign bits as the input.
2271         if (KnownZero.isSignBitSet())
2272           return Tmp2;
2273 
2274         // Otherwise, we treat this like a SUB.
2275       }
2276 
2277     // Sub can have at most one carry bit.  Thus we know that the output
2278     // is, at worst, one more bit than the inputs.
2279     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2280     if (Tmp == 1) return 1;  // Early out.
2281     return std::min(Tmp, Tmp2)-1;
2282 
2283   case Instruction::PHI: {
2284     const PHINode *PN = cast<PHINode>(U);
2285     unsigned NumIncomingValues = PN->getNumIncomingValues();
2286     // Don't analyze large in-degree PHIs.
2287     if (NumIncomingValues > 4) break;
2288     // Unreachable blocks may have zero-operand PHI nodes.
2289     if (NumIncomingValues == 0) break;
2290 
2291     // Take the minimum of all incoming values.  This can't infinitely loop
2292     // because of our depth threshold.
2293     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2294     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2295       if (Tmp == 1) return Tmp;
2296       Tmp = std::min(
2297           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2298     }
2299     return Tmp;
2300   }
2301 
2302   case Instruction::Trunc:
2303     // FIXME: it's tricky to do anything useful for this, but it is an important
2304     // case for targets like X86.
2305     break;
2306 
2307   case Instruction::ExtractElement:
2308     // Look through extract element. At the moment we keep this simple and skip
2309     // tracking the specific element. But at least we might find information
2310     // valid for all elements of the vector (for example if vector is sign
2311     // extended, shifted, etc).
2312     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2313   }
2314 
2315   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2316   // use this information.
2317 
2318   // If we can examine all elements of a vector constant successfully, we're
2319   // done (we can't do any better than that). If not, keep trying.
2320   if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2321     return VecSignBits;
2322 
2323   APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2324   computeKnownBits(V, KnownZero, KnownOne, Depth, Q);
2325 
2326   // If we know that the sign bit is either zero or one, determine the number of
2327   // identical bits in the top of the input value.
2328   if (KnownZero.isSignBitSet())
2329     return std::max(FirstAnswer, KnownZero.countLeadingOnes());
2330 
2331   if (KnownOne.isSignBitSet())
2332     return std::max(FirstAnswer, KnownOne.countLeadingOnes());
2333 
2334   // computeKnownBits gave us no extra information about the top bits.
2335   return FirstAnswer;
2336 }
2337 
2338 /// This function computes the integer multiple of Base that equals V.
2339 /// If successful, it returns true and returns the multiple in
2340 /// Multiple. If unsuccessful, it returns false. It looks
2341 /// through SExt instructions only if LookThroughSExt is true.
2342 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2343                            bool LookThroughSExt, unsigned Depth) {
2344   const unsigned MaxDepth = 6;
2345 
2346   assert(V && "No Value?");
2347   assert(Depth <= MaxDepth && "Limit Search Depth");
2348   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2349 
2350   Type *T = V->getType();
2351 
2352   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2353 
2354   if (Base == 0)
2355     return false;
2356 
2357   if (Base == 1) {
2358     Multiple = V;
2359     return true;
2360   }
2361 
2362   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2363   Constant *BaseVal = ConstantInt::get(T, Base);
2364   if (CO && CO == BaseVal) {
2365     // Multiple is 1.
2366     Multiple = ConstantInt::get(T, 1);
2367     return true;
2368   }
2369 
2370   if (CI && CI->getZExtValue() % Base == 0) {
2371     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2372     return true;
2373   }
2374 
2375   if (Depth == MaxDepth) return false;  // Limit search depth.
2376 
2377   Operator *I = dyn_cast<Operator>(V);
2378   if (!I) return false;
2379 
2380   switch (I->getOpcode()) {
2381   default: break;
2382   case Instruction::SExt:
2383     if (!LookThroughSExt) return false;
2384     // otherwise fall through to ZExt
2385   case Instruction::ZExt:
2386     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2387                            LookThroughSExt, Depth+1);
2388   case Instruction::Shl:
2389   case Instruction::Mul: {
2390     Value *Op0 = I->getOperand(0);
2391     Value *Op1 = I->getOperand(1);
2392 
2393     if (I->getOpcode() == Instruction::Shl) {
2394       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2395       if (!Op1CI) return false;
2396       // Turn Op0 << Op1 into Op0 * 2^Op1
2397       APInt Op1Int = Op1CI->getValue();
2398       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2399       APInt API(Op1Int.getBitWidth(), 0);
2400       API.setBit(BitToSet);
2401       Op1 = ConstantInt::get(V->getContext(), API);
2402     }
2403 
2404     Value *Mul0 = nullptr;
2405     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2406       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2407         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2408           if (Op1C->getType()->getPrimitiveSizeInBits() <
2409               MulC->getType()->getPrimitiveSizeInBits())
2410             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2411           if (Op1C->getType()->getPrimitiveSizeInBits() >
2412               MulC->getType()->getPrimitiveSizeInBits())
2413             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2414 
2415           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2416           Multiple = ConstantExpr::getMul(MulC, Op1C);
2417           return true;
2418         }
2419 
2420       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2421         if (Mul0CI->getValue() == 1) {
2422           // V == Base * Op1, so return Op1
2423           Multiple = Op1;
2424           return true;
2425         }
2426     }
2427 
2428     Value *Mul1 = nullptr;
2429     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2430       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2431         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2432           if (Op0C->getType()->getPrimitiveSizeInBits() <
2433               MulC->getType()->getPrimitiveSizeInBits())
2434             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2435           if (Op0C->getType()->getPrimitiveSizeInBits() >
2436               MulC->getType()->getPrimitiveSizeInBits())
2437             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2438 
2439           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2440           Multiple = ConstantExpr::getMul(MulC, Op0C);
2441           return true;
2442         }
2443 
2444       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2445         if (Mul1CI->getValue() == 1) {
2446           // V == Base * Op0, so return Op0
2447           Multiple = Op0;
2448           return true;
2449         }
2450     }
2451   }
2452   }
2453 
2454   // We could not determine if V is a multiple of Base.
2455   return false;
2456 }
2457 
2458 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2459                                             const TargetLibraryInfo *TLI) {
2460   const Function *F = ICS.getCalledFunction();
2461   if (!F)
2462     return Intrinsic::not_intrinsic;
2463 
2464   if (F->isIntrinsic())
2465     return F->getIntrinsicID();
2466 
2467   if (!TLI)
2468     return Intrinsic::not_intrinsic;
2469 
2470   LibFunc Func;
2471   // We're going to make assumptions on the semantics of the functions, check
2472   // that the target knows that it's available in this environment and it does
2473   // not have local linkage.
2474   if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2475     return Intrinsic::not_intrinsic;
2476 
2477   if (!ICS.onlyReadsMemory())
2478     return Intrinsic::not_intrinsic;
2479 
2480   // Otherwise check if we have a call to a function that can be turned into a
2481   // vector intrinsic.
2482   switch (Func) {
2483   default:
2484     break;
2485   case LibFunc_sin:
2486   case LibFunc_sinf:
2487   case LibFunc_sinl:
2488     return Intrinsic::sin;
2489   case LibFunc_cos:
2490   case LibFunc_cosf:
2491   case LibFunc_cosl:
2492     return Intrinsic::cos;
2493   case LibFunc_exp:
2494   case LibFunc_expf:
2495   case LibFunc_expl:
2496     return Intrinsic::exp;
2497   case LibFunc_exp2:
2498   case LibFunc_exp2f:
2499   case LibFunc_exp2l:
2500     return Intrinsic::exp2;
2501   case LibFunc_log:
2502   case LibFunc_logf:
2503   case LibFunc_logl:
2504     return Intrinsic::log;
2505   case LibFunc_log10:
2506   case LibFunc_log10f:
2507   case LibFunc_log10l:
2508     return Intrinsic::log10;
2509   case LibFunc_log2:
2510   case LibFunc_log2f:
2511   case LibFunc_log2l:
2512     return Intrinsic::log2;
2513   case LibFunc_fabs:
2514   case LibFunc_fabsf:
2515   case LibFunc_fabsl:
2516     return Intrinsic::fabs;
2517   case LibFunc_fmin:
2518   case LibFunc_fminf:
2519   case LibFunc_fminl:
2520     return Intrinsic::minnum;
2521   case LibFunc_fmax:
2522   case LibFunc_fmaxf:
2523   case LibFunc_fmaxl:
2524     return Intrinsic::maxnum;
2525   case LibFunc_copysign:
2526   case LibFunc_copysignf:
2527   case LibFunc_copysignl:
2528     return Intrinsic::copysign;
2529   case LibFunc_floor:
2530   case LibFunc_floorf:
2531   case LibFunc_floorl:
2532     return Intrinsic::floor;
2533   case LibFunc_ceil:
2534   case LibFunc_ceilf:
2535   case LibFunc_ceill:
2536     return Intrinsic::ceil;
2537   case LibFunc_trunc:
2538   case LibFunc_truncf:
2539   case LibFunc_truncl:
2540     return Intrinsic::trunc;
2541   case LibFunc_rint:
2542   case LibFunc_rintf:
2543   case LibFunc_rintl:
2544     return Intrinsic::rint;
2545   case LibFunc_nearbyint:
2546   case LibFunc_nearbyintf:
2547   case LibFunc_nearbyintl:
2548     return Intrinsic::nearbyint;
2549   case LibFunc_round:
2550   case LibFunc_roundf:
2551   case LibFunc_roundl:
2552     return Intrinsic::round;
2553   case LibFunc_pow:
2554   case LibFunc_powf:
2555   case LibFunc_powl:
2556     return Intrinsic::pow;
2557   case LibFunc_sqrt:
2558   case LibFunc_sqrtf:
2559   case LibFunc_sqrtl:
2560     if (ICS->hasNoNaNs())
2561       return Intrinsic::sqrt;
2562     return Intrinsic::not_intrinsic;
2563   }
2564 
2565   return Intrinsic::not_intrinsic;
2566 }
2567 
2568 /// Return true if we can prove that the specified FP value is never equal to
2569 /// -0.0.
2570 ///
2571 /// NOTE: this function will need to be revisited when we support non-default
2572 /// rounding modes!
2573 ///
2574 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2575                                 unsigned Depth) {
2576   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2577     return !CFP->getValueAPF().isNegZero();
2578 
2579   if (Depth == MaxDepth)
2580     return false;  // Limit search depth.
2581 
2582   const Operator *I = dyn_cast<Operator>(V);
2583   if (!I) return false;
2584 
2585   // Check if the nsz fast-math flag is set
2586   if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2587     if (FPO->hasNoSignedZeros())
2588       return true;
2589 
2590   // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2591   if (I->getOpcode() == Instruction::FAdd)
2592     if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2593       if (CFP->isNullValue())
2594         return true;
2595 
2596   // sitofp and uitofp turn into +0.0 for zero.
2597   if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2598     return true;
2599 
2600   if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2601     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2602     switch (IID) {
2603     default:
2604       break;
2605     // sqrt(-0.0) = -0.0, no other negative results are possible.
2606     case Intrinsic::sqrt:
2607       return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1);
2608     // fabs(x) != -0.0
2609     case Intrinsic::fabs:
2610       return true;
2611     }
2612   }
2613 
2614   return false;
2615 }
2616 
2617 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2618 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2619 /// bit despite comparing equal.
2620 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2621                                             const TargetLibraryInfo *TLI,
2622                                             bool SignBitOnly,
2623                                             unsigned Depth) {
2624   // TODO: This function does not do the right thing when SignBitOnly is true
2625   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2626   // which flips the sign bits of NaNs.  See
2627   // https://llvm.org/bugs/show_bug.cgi?id=31702.
2628 
2629   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2630     return !CFP->getValueAPF().isNegative() ||
2631            (!SignBitOnly && CFP->getValueAPF().isZero());
2632   }
2633 
2634   if (Depth == MaxDepth)
2635     return false; // Limit search depth.
2636 
2637   const Operator *I = dyn_cast<Operator>(V);
2638   if (!I)
2639     return false;
2640 
2641   switch (I->getOpcode()) {
2642   default:
2643     break;
2644   // Unsigned integers are always nonnegative.
2645   case Instruction::UIToFP:
2646     return true;
2647   case Instruction::FMul:
2648     // x*x is always non-negative or a NaN.
2649     if (I->getOperand(0) == I->getOperand(1) &&
2650         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2651       return true;
2652 
2653     LLVM_FALLTHROUGH;
2654   case Instruction::FAdd:
2655   case Instruction::FDiv:
2656   case Instruction::FRem:
2657     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2658                                            Depth + 1) &&
2659            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2660                                            Depth + 1);
2661   case Instruction::Select:
2662     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2663                                            Depth + 1) &&
2664            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2665                                            Depth + 1);
2666   case Instruction::FPExt:
2667   case Instruction::FPTrunc:
2668     // Widening/narrowing never change sign.
2669     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2670                                            Depth + 1);
2671   case Instruction::Call:
2672     const auto *CI = cast<CallInst>(I);
2673     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2674     switch (IID) {
2675     default:
2676       break;
2677     case Intrinsic::maxnum:
2678       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2679                                              Depth + 1) ||
2680              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2681                                              Depth + 1);
2682     case Intrinsic::minnum:
2683       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2684                                              Depth + 1) &&
2685              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2686                                              Depth + 1);
2687     case Intrinsic::exp:
2688     case Intrinsic::exp2:
2689     case Intrinsic::fabs:
2690       return true;
2691 
2692     case Intrinsic::sqrt:
2693       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
2694       if (!SignBitOnly)
2695         return true;
2696       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2697                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
2698 
2699     case Intrinsic::powi:
2700       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2701         // powi(x,n) is non-negative if n is even.
2702         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2703           return true;
2704       }
2705       // TODO: This is not correct.  Given that exp is an integer, here are the
2706       // ways that pow can return a negative value:
2707       //
2708       //   pow(x, exp)    --> negative if exp is odd and x is negative.
2709       //   pow(-0, exp)   --> -inf if exp is negative odd.
2710       //   pow(-0, exp)   --> -0 if exp is positive odd.
2711       //   pow(-inf, exp) --> -0 if exp is negative odd.
2712       //   pow(-inf, exp) --> -inf if exp is positive odd.
2713       //
2714       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2715       // but we must return false if x == -0.  Unfortunately we do not currently
2716       // have a way of expressing this constraint.  See details in
2717       // https://llvm.org/bugs/show_bug.cgi?id=31702.
2718       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2719                                              Depth + 1);
2720 
2721     case Intrinsic::fma:
2722     case Intrinsic::fmuladd:
2723       // x*x+y is non-negative if y is non-negative.
2724       return I->getOperand(0) == I->getOperand(1) &&
2725              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2726              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2727                                              Depth + 1);
2728     }
2729     break;
2730   }
2731   return false;
2732 }
2733 
2734 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2735                                        const TargetLibraryInfo *TLI) {
2736   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2737 }
2738 
2739 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
2740   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2741 }
2742 
2743 /// If the specified value can be set by repeating the same byte in memory,
2744 /// return the i8 value that it is represented with.  This is
2745 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2746 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
2747 /// byte store (e.g. i16 0x1234), return null.
2748 Value *llvm::isBytewiseValue(Value *V) {
2749   // All byte-wide stores are splatable, even of arbitrary variables.
2750   if (V->getType()->isIntegerTy(8)) return V;
2751 
2752   // Handle 'null' ConstantArrayZero etc.
2753   if (Constant *C = dyn_cast<Constant>(V))
2754     if (C->isNullValue())
2755       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2756 
2757   // Constant float and double values can be handled as integer values if the
2758   // corresponding integer value is "byteable".  An important case is 0.0.
2759   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2760     if (CFP->getType()->isFloatTy())
2761       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2762     if (CFP->getType()->isDoubleTy())
2763       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2764     // Don't handle long double formats, which have strange constraints.
2765   }
2766 
2767   // We can handle constant integers that are multiple of 8 bits.
2768   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2769     if (CI->getBitWidth() % 8 == 0) {
2770       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2771 
2772       if (!CI->getValue().isSplat(8))
2773         return nullptr;
2774       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2775     }
2776   }
2777 
2778   // A ConstantDataArray/Vector is splatable if all its members are equal and
2779   // also splatable.
2780   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2781     Value *Elt = CA->getElementAsConstant(0);
2782     Value *Val = isBytewiseValue(Elt);
2783     if (!Val)
2784       return nullptr;
2785 
2786     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2787       if (CA->getElementAsConstant(I) != Elt)
2788         return nullptr;
2789 
2790     return Val;
2791   }
2792 
2793   // Conceptually, we could handle things like:
2794   //   %a = zext i8 %X to i16
2795   //   %b = shl i16 %a, 8
2796   //   %c = or i16 %a, %b
2797   // but until there is an example that actually needs this, it doesn't seem
2798   // worth worrying about.
2799   return nullptr;
2800 }
2801 
2802 
2803 // This is the recursive version of BuildSubAggregate. It takes a few different
2804 // arguments. Idxs is the index within the nested struct From that we are
2805 // looking at now (which is of type IndexedType). IdxSkip is the number of
2806 // indices from Idxs that should be left out when inserting into the resulting
2807 // struct. To is the result struct built so far, new insertvalue instructions
2808 // build on that.
2809 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2810                                 SmallVectorImpl<unsigned> &Idxs,
2811                                 unsigned IdxSkip,
2812                                 Instruction *InsertBefore) {
2813   llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2814   if (STy) {
2815     // Save the original To argument so we can modify it
2816     Value *OrigTo = To;
2817     // General case, the type indexed by Idxs is a struct
2818     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2819       // Process each struct element recursively
2820       Idxs.push_back(i);
2821       Value *PrevTo = To;
2822       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2823                              InsertBefore);
2824       Idxs.pop_back();
2825       if (!To) {
2826         // Couldn't find any inserted value for this index? Cleanup
2827         while (PrevTo != OrigTo) {
2828           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2829           PrevTo = Del->getAggregateOperand();
2830           Del->eraseFromParent();
2831         }
2832         // Stop processing elements
2833         break;
2834       }
2835     }
2836     // If we successfully found a value for each of our subaggregates
2837     if (To)
2838       return To;
2839   }
2840   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2841   // the struct's elements had a value that was inserted directly. In the latter
2842   // case, perhaps we can't determine each of the subelements individually, but
2843   // we might be able to find the complete struct somewhere.
2844 
2845   // Find the value that is at that particular spot
2846   Value *V = FindInsertedValue(From, Idxs);
2847 
2848   if (!V)
2849     return nullptr;
2850 
2851   // Insert the value in the new (sub) aggregrate
2852   return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2853                                        "tmp", InsertBefore);
2854 }
2855 
2856 // This helper takes a nested struct and extracts a part of it (which is again a
2857 // struct) into a new value. For example, given the struct:
2858 // { a, { b, { c, d }, e } }
2859 // and the indices "1, 1" this returns
2860 // { c, d }.
2861 //
2862 // It does this by inserting an insertvalue for each element in the resulting
2863 // struct, as opposed to just inserting a single struct. This will only work if
2864 // each of the elements of the substruct are known (ie, inserted into From by an
2865 // insertvalue instruction somewhere).
2866 //
2867 // All inserted insertvalue instructions are inserted before InsertBefore
2868 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2869                                 Instruction *InsertBefore) {
2870   assert(InsertBefore && "Must have someplace to insert!");
2871   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2872                                                              idx_range);
2873   Value *To = UndefValue::get(IndexedType);
2874   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2875   unsigned IdxSkip = Idxs.size();
2876 
2877   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2878 }
2879 
2880 /// Given an aggregrate and an sequence of indices, see if
2881 /// the scalar value indexed is already around as a register, for example if it
2882 /// were inserted directly into the aggregrate.
2883 ///
2884 /// If InsertBefore is not null, this function will duplicate (modified)
2885 /// insertvalues when a part of a nested struct is extracted.
2886 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2887                                Instruction *InsertBefore) {
2888   // Nothing to index? Just return V then (this is useful at the end of our
2889   // recursion).
2890   if (idx_range.empty())
2891     return V;
2892   // We have indices, so V should have an indexable type.
2893   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2894          "Not looking at a struct or array?");
2895   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2896          "Invalid indices for type?");
2897 
2898   if (Constant *C = dyn_cast<Constant>(V)) {
2899     C = C->getAggregateElement(idx_range[0]);
2900     if (!C) return nullptr;
2901     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2902   }
2903 
2904   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2905     // Loop the indices for the insertvalue instruction in parallel with the
2906     // requested indices
2907     const unsigned *req_idx = idx_range.begin();
2908     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2909          i != e; ++i, ++req_idx) {
2910       if (req_idx == idx_range.end()) {
2911         // We can't handle this without inserting insertvalues
2912         if (!InsertBefore)
2913           return nullptr;
2914 
2915         // The requested index identifies a part of a nested aggregate. Handle
2916         // this specially. For example,
2917         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2918         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2919         // %C = extractvalue {i32, { i32, i32 } } %B, 1
2920         // This can be changed into
2921         // %A = insertvalue {i32, i32 } undef, i32 10, 0
2922         // %C = insertvalue {i32, i32 } %A, i32 11, 1
2923         // which allows the unused 0,0 element from the nested struct to be
2924         // removed.
2925         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2926                                  InsertBefore);
2927       }
2928 
2929       // This insert value inserts something else than what we are looking for.
2930       // See if the (aggregate) value inserted into has the value we are
2931       // looking for, then.
2932       if (*req_idx != *i)
2933         return FindInsertedValue(I->getAggregateOperand(), idx_range,
2934                                  InsertBefore);
2935     }
2936     // If we end up here, the indices of the insertvalue match with those
2937     // requested (though possibly only partially). Now we recursively look at
2938     // the inserted value, passing any remaining indices.
2939     return FindInsertedValue(I->getInsertedValueOperand(),
2940                              makeArrayRef(req_idx, idx_range.end()),
2941                              InsertBefore);
2942   }
2943 
2944   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2945     // If we're extracting a value from an aggregate that was extracted from
2946     // something else, we can extract from that something else directly instead.
2947     // However, we will need to chain I's indices with the requested indices.
2948 
2949     // Calculate the number of indices required
2950     unsigned size = I->getNumIndices() + idx_range.size();
2951     // Allocate some space to put the new indices in
2952     SmallVector<unsigned, 5> Idxs;
2953     Idxs.reserve(size);
2954     // Add indices from the extract value instruction
2955     Idxs.append(I->idx_begin(), I->idx_end());
2956 
2957     // Add requested indices
2958     Idxs.append(idx_range.begin(), idx_range.end());
2959 
2960     assert(Idxs.size() == size
2961            && "Number of indices added not correct?");
2962 
2963     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2964   }
2965   // Otherwise, we don't know (such as, extracting from a function return value
2966   // or load instruction)
2967   return nullptr;
2968 }
2969 
2970 /// Analyze the specified pointer to see if it can be expressed as a base
2971 /// pointer plus a constant offset. Return the base and offset to the caller.
2972 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2973                                               const DataLayout &DL) {
2974   unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2975   APInt ByteOffset(BitWidth, 0);
2976 
2977   // We walk up the defs but use a visited set to handle unreachable code. In
2978   // that case, we stop after accumulating the cycle once (not that it
2979   // matters).
2980   SmallPtrSet<Value *, 16> Visited;
2981   while (Visited.insert(Ptr).second) {
2982     if (Ptr->getType()->isVectorTy())
2983       break;
2984 
2985     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2986       // If one of the values we have visited is an addrspacecast, then
2987       // the pointer type of this GEP may be different from the type
2988       // of the Ptr parameter which was passed to this function.  This
2989       // means when we construct GEPOffset, we need to use the size
2990       // of GEP's pointer type rather than the size of the original
2991       // pointer type.
2992       APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
2993       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2994         break;
2995 
2996       ByteOffset += GEPOffset.getSExtValue();
2997 
2998       Ptr = GEP->getPointerOperand();
2999     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
3000                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
3001       Ptr = cast<Operator>(Ptr)->getOperand(0);
3002     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
3003       if (GA->isInterposable())
3004         break;
3005       Ptr = GA->getAliasee();
3006     } else {
3007       break;
3008     }
3009   }
3010   Offset = ByteOffset.getSExtValue();
3011   return Ptr;
3012 }
3013 
3014 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) {
3015   // Make sure the GEP has exactly three arguments.
3016   if (GEP->getNumOperands() != 3)
3017     return false;
3018 
3019   // Make sure the index-ee is a pointer to array of i8.
3020   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3021   if (!AT || !AT->getElementType()->isIntegerTy(8))
3022     return false;
3023 
3024   // Check to make sure that the first operand of the GEP is an integer and
3025   // has value 0 so that we are sure we're indexing into the initializer.
3026   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3027   if (!FirstIdx || !FirstIdx->isZero())
3028     return false;
3029 
3030   return true;
3031 }
3032 
3033 /// This function computes the length of a null-terminated C string pointed to
3034 /// by V. If successful, it returns true and returns the string in Str.
3035 /// If unsuccessful, it returns false.
3036 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3037                                  uint64_t Offset, bool TrimAtNul) {
3038   assert(V);
3039 
3040   // Look through bitcast instructions and geps.
3041   V = V->stripPointerCasts();
3042 
3043   // If the value is a GEP instruction or constant expression, treat it as an
3044   // offset.
3045   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3046     // The GEP operator should be based on a pointer to string constant, and is
3047     // indexing into the string constant.
3048     if (!isGEPBasedOnPointerToString(GEP))
3049       return false;
3050 
3051     // If the second index isn't a ConstantInt, then this is a variable index
3052     // into the array.  If this occurs, we can't say anything meaningful about
3053     // the string.
3054     uint64_t StartIdx = 0;
3055     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3056       StartIdx = CI->getZExtValue();
3057     else
3058       return false;
3059     return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
3060                                  TrimAtNul);
3061   }
3062 
3063   // The GEP instruction, constant or instruction, must reference a global
3064   // variable that is a constant and is initialized. The referenced constant
3065   // initializer is the array that we'll use for optimization.
3066   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3067   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3068     return false;
3069 
3070   // Handle the all-zeros case.
3071   if (GV->getInitializer()->isNullValue()) {
3072     // This is a degenerate case. The initializer is constant zero so the
3073     // length of the string must be zero.
3074     Str = "";
3075     return true;
3076   }
3077 
3078   // This must be a ConstantDataArray.
3079   const auto *Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3080   if (!Array || !Array->isString())
3081     return false;
3082 
3083   // Get the number of elements in the array.
3084   uint64_t NumElts = Array->getType()->getArrayNumElements();
3085 
3086   // Start out with the entire array in the StringRef.
3087   Str = Array->getAsString();
3088 
3089   if (Offset > NumElts)
3090     return false;
3091 
3092   // Skip over 'offset' bytes.
3093   Str = Str.substr(Offset);
3094 
3095   if (TrimAtNul) {
3096     // Trim off the \0 and anything after it.  If the array is not nul
3097     // terminated, we just return the whole end of string.  The client may know
3098     // some other way that the string is length-bound.
3099     Str = Str.substr(0, Str.find('\0'));
3100   }
3101   return true;
3102 }
3103 
3104 // These next two are very similar to the above, but also look through PHI
3105 // nodes.
3106 // TODO: See if we can integrate these two together.
3107 
3108 /// If we can compute the length of the string pointed to by
3109 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3110 static uint64_t GetStringLengthH(const Value *V,
3111                                  SmallPtrSetImpl<const PHINode*> &PHIs) {
3112   // Look through noop bitcast instructions.
3113   V = V->stripPointerCasts();
3114 
3115   // If this is a PHI node, there are two cases: either we have already seen it
3116   // or we haven't.
3117   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3118     if (!PHIs.insert(PN).second)
3119       return ~0ULL;  // already in the set.
3120 
3121     // If it was new, see if all the input strings are the same length.
3122     uint64_t LenSoFar = ~0ULL;
3123     for (Value *IncValue : PN->incoming_values()) {
3124       uint64_t Len = GetStringLengthH(IncValue, PHIs);
3125       if (Len == 0) return 0; // Unknown length -> unknown.
3126 
3127       if (Len == ~0ULL) continue;
3128 
3129       if (Len != LenSoFar && LenSoFar != ~0ULL)
3130         return 0;    // Disagree -> unknown.
3131       LenSoFar = Len;
3132     }
3133 
3134     // Success, all agree.
3135     return LenSoFar;
3136   }
3137 
3138   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3139   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3140     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
3141     if (Len1 == 0) return 0;
3142     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
3143     if (Len2 == 0) return 0;
3144     if (Len1 == ~0ULL) return Len2;
3145     if (Len2 == ~0ULL) return Len1;
3146     if (Len1 != Len2) return 0;
3147     return Len1;
3148   }
3149 
3150   // Otherwise, see if we can read the string.
3151   StringRef StrData;
3152   if (!getConstantStringInfo(V, StrData))
3153     return 0;
3154 
3155   return StrData.size()+1;
3156 }
3157 
3158 /// If we can compute the length of the string pointed to by
3159 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3160 uint64_t llvm::GetStringLength(const Value *V) {
3161   if (!V->getType()->isPointerTy()) return 0;
3162 
3163   SmallPtrSet<const PHINode*, 32> PHIs;
3164   uint64_t Len = GetStringLengthH(V, PHIs);
3165   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3166   // an empty string as a length.
3167   return Len == ~0ULL ? 1 : Len;
3168 }
3169 
3170 /// \brief \p PN defines a loop-variant pointer to an object.  Check if the
3171 /// previous iteration of the loop was referring to the same object as \p PN.
3172 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3173                                          const LoopInfo *LI) {
3174   // Find the loop-defined value.
3175   Loop *L = LI->getLoopFor(PN->getParent());
3176   if (PN->getNumIncomingValues() != 2)
3177     return true;
3178 
3179   // Find the value from previous iteration.
3180   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3181   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3182     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3183   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3184     return true;
3185 
3186   // If a new pointer is loaded in the loop, the pointer references a different
3187   // object in every iteration.  E.g.:
3188   //    for (i)
3189   //       int *p = a[i];
3190   //       ...
3191   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3192     if (!L->isLoopInvariant(Load->getPointerOperand()))
3193       return false;
3194   return true;
3195 }
3196 
3197 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3198                                  unsigned MaxLookup) {
3199   if (!V->getType()->isPointerTy())
3200     return V;
3201   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3202     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3203       V = GEP->getPointerOperand();
3204     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3205                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3206       V = cast<Operator>(V)->getOperand(0);
3207     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3208       if (GA->isInterposable())
3209         return V;
3210       V = GA->getAliasee();
3211     } else if (isa<AllocaInst>(V)) {
3212       // An alloca can't be further simplified.
3213       return V;
3214     } else {
3215       if (auto CS = CallSite(V))
3216         if (Value *RV = CS.getReturnedArgOperand()) {
3217           V = RV;
3218           continue;
3219         }
3220 
3221       // See if InstructionSimplify knows any relevant tricks.
3222       if (Instruction *I = dyn_cast<Instruction>(V))
3223         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3224         if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
3225           V = Simplified;
3226           continue;
3227         }
3228 
3229       return V;
3230     }
3231     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3232   }
3233   return V;
3234 }
3235 
3236 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3237                                 const DataLayout &DL, LoopInfo *LI,
3238                                 unsigned MaxLookup) {
3239   SmallPtrSet<Value *, 4> Visited;
3240   SmallVector<Value *, 4> Worklist;
3241   Worklist.push_back(V);
3242   do {
3243     Value *P = Worklist.pop_back_val();
3244     P = GetUnderlyingObject(P, DL, MaxLookup);
3245 
3246     if (!Visited.insert(P).second)
3247       continue;
3248 
3249     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3250       Worklist.push_back(SI->getTrueValue());
3251       Worklist.push_back(SI->getFalseValue());
3252       continue;
3253     }
3254 
3255     if (PHINode *PN = dyn_cast<PHINode>(P)) {
3256       // If this PHI changes the underlying object in every iteration of the
3257       // loop, don't look through it.  Consider:
3258       //   int **A;
3259       //   for (i) {
3260       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3261       //     Curr = A[i];
3262       //     *Prev, *Curr;
3263       //
3264       // Prev is tracking Curr one iteration behind so they refer to different
3265       // underlying objects.
3266       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3267           isSameUnderlyingObjectInLoop(PN, LI))
3268         for (Value *IncValue : PN->incoming_values())
3269           Worklist.push_back(IncValue);
3270       continue;
3271     }
3272 
3273     Objects.push_back(P);
3274   } while (!Worklist.empty());
3275 }
3276 
3277 /// Return true if the only users of this pointer are lifetime markers.
3278 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3279   for (const User *U : V->users()) {
3280     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3281     if (!II) return false;
3282 
3283     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3284         II->getIntrinsicID() != Intrinsic::lifetime_end)
3285       return false;
3286   }
3287   return true;
3288 }
3289 
3290 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3291                                         const Instruction *CtxI,
3292                                         const DominatorTree *DT) {
3293   const Operator *Inst = dyn_cast<Operator>(V);
3294   if (!Inst)
3295     return false;
3296 
3297   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3298     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3299       if (C->canTrap())
3300         return false;
3301 
3302   switch (Inst->getOpcode()) {
3303   default:
3304     return true;
3305   case Instruction::UDiv:
3306   case Instruction::URem: {
3307     // x / y is undefined if y == 0.
3308     const APInt *V;
3309     if (match(Inst->getOperand(1), m_APInt(V)))
3310       return *V != 0;
3311     return false;
3312   }
3313   case Instruction::SDiv:
3314   case Instruction::SRem: {
3315     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3316     const APInt *Numerator, *Denominator;
3317     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3318       return false;
3319     // We cannot hoist this division if the denominator is 0.
3320     if (*Denominator == 0)
3321       return false;
3322     // It's safe to hoist if the denominator is not 0 or -1.
3323     if (*Denominator != -1)
3324       return true;
3325     // At this point we know that the denominator is -1.  It is safe to hoist as
3326     // long we know that the numerator is not INT_MIN.
3327     if (match(Inst->getOperand(0), m_APInt(Numerator)))
3328       return !Numerator->isMinSignedValue();
3329     // The numerator *might* be MinSignedValue.
3330     return false;
3331   }
3332   case Instruction::Load: {
3333     const LoadInst *LI = cast<LoadInst>(Inst);
3334     if (!LI->isUnordered() ||
3335         // Speculative load may create a race that did not exist in the source.
3336         LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3337         // Speculative load may load data from dirty regions.
3338         LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress))
3339       return false;
3340     const DataLayout &DL = LI->getModule()->getDataLayout();
3341     return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3342                                               LI->getAlignment(), DL, CtxI, DT);
3343   }
3344   case Instruction::Call: {
3345     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
3346       switch (II->getIntrinsicID()) {
3347       // These synthetic intrinsics have no side-effects and just mark
3348       // information about their operands.
3349       // FIXME: There are other no-op synthetic instructions that potentially
3350       // should be considered at least *safe* to speculate...
3351       case Intrinsic::dbg_declare:
3352       case Intrinsic::dbg_value:
3353         return true;
3354 
3355       case Intrinsic::bitreverse:
3356       case Intrinsic::bswap:
3357       case Intrinsic::ctlz:
3358       case Intrinsic::ctpop:
3359       case Intrinsic::cttz:
3360       case Intrinsic::objectsize:
3361       case Intrinsic::sadd_with_overflow:
3362       case Intrinsic::smul_with_overflow:
3363       case Intrinsic::ssub_with_overflow:
3364       case Intrinsic::uadd_with_overflow:
3365       case Intrinsic::umul_with_overflow:
3366       case Intrinsic::usub_with_overflow:
3367         return true;
3368       // These intrinsics are defined to have the same behavior as libm
3369       // functions except for setting errno.
3370       case Intrinsic::sqrt:
3371       case Intrinsic::fma:
3372       case Intrinsic::fmuladd:
3373         return true;
3374       // These intrinsics are defined to have the same behavior as libm
3375       // functions, and the corresponding libm functions never set errno.
3376       case Intrinsic::trunc:
3377       case Intrinsic::copysign:
3378       case Intrinsic::fabs:
3379       case Intrinsic::minnum:
3380       case Intrinsic::maxnum:
3381         return true;
3382       // These intrinsics are defined to have the same behavior as libm
3383       // functions, which never overflow when operating on the IEEE754 types
3384       // that we support, and never set errno otherwise.
3385       case Intrinsic::ceil:
3386       case Intrinsic::floor:
3387       case Intrinsic::nearbyint:
3388       case Intrinsic::rint:
3389       case Intrinsic::round:
3390         return true;
3391       // These intrinsics do not correspond to any libm function, and
3392       // do not set errno.
3393       case Intrinsic::powi:
3394         return true;
3395       // TODO: are convert_{from,to}_fp16 safe?
3396       // TODO: can we list target-specific intrinsics here?
3397       default: break;
3398       }
3399     }
3400     return false; // The called function could have undefined behavior or
3401                   // side-effects, even if marked readnone nounwind.
3402   }
3403   case Instruction::VAArg:
3404   case Instruction::Alloca:
3405   case Instruction::Invoke:
3406   case Instruction::PHI:
3407   case Instruction::Store:
3408   case Instruction::Ret:
3409   case Instruction::Br:
3410   case Instruction::IndirectBr:
3411   case Instruction::Switch:
3412   case Instruction::Unreachable:
3413   case Instruction::Fence:
3414   case Instruction::AtomicRMW:
3415   case Instruction::AtomicCmpXchg:
3416   case Instruction::LandingPad:
3417   case Instruction::Resume:
3418   case Instruction::CatchSwitch:
3419   case Instruction::CatchPad:
3420   case Instruction::CatchRet:
3421   case Instruction::CleanupPad:
3422   case Instruction::CleanupRet:
3423     return false; // Misc instructions which have effects
3424   }
3425 }
3426 
3427 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3428   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3429 }
3430 
3431 /// Return true if we know that the specified value is never null.
3432 bool llvm::isKnownNonNull(const Value *V) {
3433   assert(V->getType()->isPointerTy() && "V must be pointer type");
3434 
3435   // Alloca never returns null, malloc might.
3436   if (isa<AllocaInst>(V)) return true;
3437 
3438   // A byval, inalloca, or nonnull argument is never null.
3439   if (const Argument *A = dyn_cast<Argument>(V))
3440     return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3441 
3442   // A global variable in address space 0 is non null unless extern weak
3443   // or an absolute symbol reference. Other address spaces may have null as a
3444   // valid address for a global, so we can't assume anything.
3445   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3446     return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3447            GV->getType()->getAddressSpace() == 0;
3448 
3449   // A Load tagged with nonnull metadata is never null.
3450   if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3451     return LI->getMetadata(LLVMContext::MD_nonnull);
3452 
3453   if (auto CS = ImmutableCallSite(V))
3454     if (CS.isReturnNonNull())
3455       return true;
3456 
3457   return false;
3458 }
3459 
3460 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3461                                                   const Instruction *CtxI,
3462                                                   const DominatorTree *DT) {
3463   assert(V->getType()->isPointerTy() && "V must be pointer type");
3464   assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
3465   assert(CtxI && "Context instruction required for analysis");
3466   assert(DT && "Dominator tree required for analysis");
3467 
3468   unsigned NumUsesExplored = 0;
3469   for (auto *U : V->users()) {
3470     // Avoid massive lists
3471     if (NumUsesExplored >= DomConditionsMaxUses)
3472       break;
3473     NumUsesExplored++;
3474 
3475     // If the value is used as an argument to a call or invoke, then argument
3476     // attributes may provide an answer about null-ness.
3477     if (auto CS = ImmutableCallSite(U))
3478       if (auto *CalledFunc = CS.getCalledFunction())
3479         for (const Argument &Arg : CalledFunc->args())
3480           if (CS.getArgOperand(Arg.getArgNo()) == V &&
3481               Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
3482             return true;
3483 
3484     // Consider only compare instructions uniquely controlling a branch
3485     CmpInst::Predicate Pred;
3486     if (!match(const_cast<User *>(U),
3487                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
3488         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
3489       continue;
3490 
3491     for (auto *CmpU : U->users()) {
3492       if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
3493         assert(BI->isConditional() && "uses a comparison!");
3494 
3495         BasicBlock *NonNullSuccessor =
3496             BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
3497         BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3498         if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3499           return true;
3500       } else if (Pred == ICmpInst::ICMP_NE &&
3501                  match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
3502                  DT->dominates(cast<Instruction>(CmpU), CtxI)) {
3503         return true;
3504       }
3505     }
3506   }
3507 
3508   return false;
3509 }
3510 
3511 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3512                             const DominatorTree *DT) {
3513   if (isa<ConstantPointerNull>(V) || isa<UndefValue>(V))
3514     return false;
3515 
3516   if (isKnownNonNull(V))
3517     return true;
3518 
3519   if (!CtxI || !DT)
3520     return false;
3521 
3522   return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT);
3523 }
3524 
3525 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3526                                                    const Value *RHS,
3527                                                    const DataLayout &DL,
3528                                                    AssumptionCache *AC,
3529                                                    const Instruction *CxtI,
3530                                                    const DominatorTree *DT) {
3531   // Multiplying n * m significant bits yields a result of n + m significant
3532   // bits. If the total number of significant bits does not exceed the
3533   // result bit width (minus 1), there is no overflow.
3534   // This means if we have enough leading zero bits in the operands
3535   // we can guarantee that the result does not overflow.
3536   // Ref: "Hacker's Delight" by Henry Warren
3537   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3538   APInt LHSKnownZero(BitWidth, 0);
3539   APInt LHSKnownOne(BitWidth, 0);
3540   APInt RHSKnownZero(BitWidth, 0);
3541   APInt RHSKnownOne(BitWidth, 0);
3542   computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3543                    DT);
3544   computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3545                    DT);
3546   // Note that underestimating the number of zero bits gives a more
3547   // conservative answer.
3548   unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
3549                       RHSKnownZero.countLeadingOnes();
3550   // First handle the easy case: if we have enough zero bits there's
3551   // definitely no overflow.
3552   if (ZeroBits >= BitWidth)
3553     return OverflowResult::NeverOverflows;
3554 
3555   // Get the largest possible values for each operand.
3556   APInt LHSMax = ~LHSKnownZero;
3557   APInt RHSMax = ~RHSKnownZero;
3558 
3559   // We know the multiply operation doesn't overflow if the maximum values for
3560   // each operand will not overflow after we multiply them together.
3561   bool MaxOverflow;
3562   (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3563   if (!MaxOverflow)
3564     return OverflowResult::NeverOverflows;
3565 
3566   // We know it always overflows if multiplying the smallest possible values for
3567   // the operands also results in overflow.
3568   bool MinOverflow;
3569   (void)LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
3570   if (MinOverflow)
3571     return OverflowResult::AlwaysOverflows;
3572 
3573   return OverflowResult::MayOverflow;
3574 }
3575 
3576 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3577                                                    const Value *RHS,
3578                                                    const DataLayout &DL,
3579                                                    AssumptionCache *AC,
3580                                                    const Instruction *CxtI,
3581                                                    const DominatorTree *DT) {
3582   bool LHSKnownNonNegative, LHSKnownNegative;
3583   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3584                  AC, CxtI, DT);
3585   if (LHSKnownNonNegative || LHSKnownNegative) {
3586     bool RHSKnownNonNegative, RHSKnownNegative;
3587     ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3588                    AC, CxtI, DT);
3589 
3590     if (LHSKnownNegative && RHSKnownNegative) {
3591       // The sign bit is set in both cases: this MUST overflow.
3592       // Create a simple add instruction, and insert it into the struct.
3593       return OverflowResult::AlwaysOverflows;
3594     }
3595 
3596     if (LHSKnownNonNegative && RHSKnownNonNegative) {
3597       // The sign bit is clear in both cases: this CANNOT overflow.
3598       // Create a simple add instruction, and insert it into the struct.
3599       return OverflowResult::NeverOverflows;
3600     }
3601   }
3602 
3603   return OverflowResult::MayOverflow;
3604 }
3605 
3606 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3607                                                   const Value *RHS,
3608                                                   const AddOperator *Add,
3609                                                   const DataLayout &DL,
3610                                                   AssumptionCache *AC,
3611                                                   const Instruction *CxtI,
3612                                                   const DominatorTree *DT) {
3613   if (Add && Add->hasNoSignedWrap()) {
3614     return OverflowResult::NeverOverflows;
3615   }
3616 
3617   bool LHSKnownNonNegative, LHSKnownNegative;
3618   bool RHSKnownNonNegative, RHSKnownNegative;
3619   ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3620                  AC, CxtI, DT);
3621   ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3622                  AC, CxtI, DT);
3623 
3624   if ((LHSKnownNonNegative && RHSKnownNegative) ||
3625       (LHSKnownNegative && RHSKnownNonNegative)) {
3626     // The sign bits are opposite: this CANNOT overflow.
3627     return OverflowResult::NeverOverflows;
3628   }
3629 
3630   // The remaining code needs Add to be available. Early returns if not so.
3631   if (!Add)
3632     return OverflowResult::MayOverflow;
3633 
3634   // If the sign of Add is the same as at least one of the operands, this add
3635   // CANNOT overflow. This is particularly useful when the sum is
3636   // @llvm.assume'ed non-negative rather than proved so from analyzing its
3637   // operands.
3638   bool LHSOrRHSKnownNonNegative =
3639       (LHSKnownNonNegative || RHSKnownNonNegative);
3640   bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
3641   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3642     bool AddKnownNonNegative, AddKnownNegative;
3643     ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
3644                    /*Depth=*/0, AC, CxtI, DT);
3645     if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
3646         (AddKnownNegative && LHSOrRHSKnownNegative)) {
3647       return OverflowResult::NeverOverflows;
3648     }
3649   }
3650 
3651   return OverflowResult::MayOverflow;
3652 }
3653 
3654 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3655                                      const DominatorTree &DT) {
3656 #ifndef NDEBUG
3657   auto IID = II->getIntrinsicID();
3658   assert((IID == Intrinsic::sadd_with_overflow ||
3659           IID == Intrinsic::uadd_with_overflow ||
3660           IID == Intrinsic::ssub_with_overflow ||
3661           IID == Intrinsic::usub_with_overflow ||
3662           IID == Intrinsic::smul_with_overflow ||
3663           IID == Intrinsic::umul_with_overflow) &&
3664          "Not an overflow intrinsic!");
3665 #endif
3666 
3667   SmallVector<const BranchInst *, 2> GuardingBranches;
3668   SmallVector<const ExtractValueInst *, 2> Results;
3669 
3670   for (const User *U : II->users()) {
3671     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3672       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3673 
3674       if (EVI->getIndices()[0] == 0)
3675         Results.push_back(EVI);
3676       else {
3677         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3678 
3679         for (const auto *U : EVI->users())
3680           if (const auto *B = dyn_cast<BranchInst>(U)) {
3681             assert(B->isConditional() && "How else is it using an i1?");
3682             GuardingBranches.push_back(B);
3683           }
3684       }
3685     } else {
3686       // We are using the aggregate directly in a way we don't want to analyze
3687       // here (storing it to a global, say).
3688       return false;
3689     }
3690   }
3691 
3692   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3693     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3694     if (!NoWrapEdge.isSingleEdge())
3695       return false;
3696 
3697     // Check if all users of the add are provably no-wrap.
3698     for (const auto *Result : Results) {
3699       // If the extractvalue itself is not executed on overflow, the we don't
3700       // need to check each use separately, since domination is transitive.
3701       if (DT.dominates(NoWrapEdge, Result->getParent()))
3702         continue;
3703 
3704       for (auto &RU : Result->uses())
3705         if (!DT.dominates(NoWrapEdge, RU))
3706           return false;
3707     }
3708 
3709     return true;
3710   };
3711 
3712   return any_of(GuardingBranches, AllUsesGuardedByBranch);
3713 }
3714 
3715 
3716 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3717                                                  const DataLayout &DL,
3718                                                  AssumptionCache *AC,
3719                                                  const Instruction *CxtI,
3720                                                  const DominatorTree *DT) {
3721   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3722                                        Add, DL, AC, CxtI, DT);
3723 }
3724 
3725 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3726                                                  const Value *RHS,
3727                                                  const DataLayout &DL,
3728                                                  AssumptionCache *AC,
3729                                                  const Instruction *CxtI,
3730                                                  const DominatorTree *DT) {
3731   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3732 }
3733 
3734 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3735   // A memory operation returns normally if it isn't volatile. A volatile
3736   // operation is allowed to trap.
3737   //
3738   // An atomic operation isn't guaranteed to return in a reasonable amount of
3739   // time because it's possible for another thread to interfere with it for an
3740   // arbitrary length of time, but programs aren't allowed to rely on that.
3741   if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3742     return !LI->isVolatile();
3743   if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3744     return !SI->isVolatile();
3745   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3746     return !CXI->isVolatile();
3747   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3748     return !RMWI->isVolatile();
3749   if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3750     return !MII->isVolatile();
3751 
3752   // If there is no successor, then execution can't transfer to it.
3753   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3754     return !CRI->unwindsToCaller();
3755   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3756     return !CatchSwitch->unwindsToCaller();
3757   if (isa<ResumeInst>(I))
3758     return false;
3759   if (isa<ReturnInst>(I))
3760     return false;
3761   if (isa<UnreachableInst>(I))
3762     return false;
3763 
3764   // Calls can throw, or contain an infinite loop, or kill the process.
3765   if (auto CS = ImmutableCallSite(I)) {
3766     // Call sites that throw have implicit non-local control flow.
3767     if (!CS.doesNotThrow())
3768       return false;
3769 
3770     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3771     // etc. and thus not return.  However, LLVM already assumes that
3772     //
3773     //  - Thread exiting actions are modeled as writes to memory invisible to
3774     //    the program.
3775     //
3776     //  - Loops that don't have side effects (side effects are volatile/atomic
3777     //    stores and IO) always terminate (see http://llvm.org/PR965).
3778     //    Furthermore IO itself is also modeled as writes to memory invisible to
3779     //    the program.
3780     //
3781     // We rely on those assumptions here, and use the memory effects of the call
3782     // target as a proxy for checking that it always returns.
3783 
3784     // FIXME: This isn't aggressive enough; a call which only writes to a global
3785     // is guaranteed to return.
3786     return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3787            match(I, m_Intrinsic<Intrinsic::assume>());
3788   }
3789 
3790   // Other instructions return normally.
3791   return true;
3792 }
3793 
3794 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3795                                                   const Loop *L) {
3796   // The loop header is guaranteed to be executed for every iteration.
3797   //
3798   // FIXME: Relax this constraint to cover all basic blocks that are
3799   // guaranteed to be executed at every iteration.
3800   if (I->getParent() != L->getHeader()) return false;
3801 
3802   for (const Instruction &LI : *L->getHeader()) {
3803     if (&LI == I) return true;
3804     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3805   }
3806   llvm_unreachable("Instruction not contained in its own parent basic block.");
3807 }
3808 
3809 bool llvm::propagatesFullPoison(const Instruction *I) {
3810   switch (I->getOpcode()) {
3811   case Instruction::Add:
3812   case Instruction::Sub:
3813   case Instruction::Xor:
3814   case Instruction::Trunc:
3815   case Instruction::BitCast:
3816   case Instruction::AddrSpaceCast:
3817   case Instruction::Mul:
3818   case Instruction::Shl:
3819   case Instruction::GetElementPtr:
3820     // These operations all propagate poison unconditionally. Note that poison
3821     // is not any particular value, so xor or subtraction of poison with
3822     // itself still yields poison, not zero.
3823     return true;
3824 
3825   case Instruction::AShr:
3826   case Instruction::SExt:
3827     // For these operations, one bit of the input is replicated across
3828     // multiple output bits. A replicated poison bit is still poison.
3829     return true;
3830 
3831   case Instruction::ICmp:
3832     // Comparing poison with any value yields poison.  This is why, for
3833     // instance, x s< (x +nsw 1) can be folded to true.
3834     return true;
3835 
3836   default:
3837     return false;
3838   }
3839 }
3840 
3841 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3842   switch (I->getOpcode()) {
3843     case Instruction::Store:
3844       return cast<StoreInst>(I)->getPointerOperand();
3845 
3846     case Instruction::Load:
3847       return cast<LoadInst>(I)->getPointerOperand();
3848 
3849     case Instruction::AtomicCmpXchg:
3850       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3851 
3852     case Instruction::AtomicRMW:
3853       return cast<AtomicRMWInst>(I)->getPointerOperand();
3854 
3855     case Instruction::UDiv:
3856     case Instruction::SDiv:
3857     case Instruction::URem:
3858     case Instruction::SRem:
3859       return I->getOperand(1);
3860 
3861     default:
3862       return nullptr;
3863   }
3864 }
3865 
3866 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
3867   // We currently only look for uses of poison values within the same basic
3868   // block, as that makes it easier to guarantee that the uses will be
3869   // executed given that PoisonI is executed.
3870   //
3871   // FIXME: Expand this to consider uses beyond the same basic block. To do
3872   // this, look out for the distinction between post-dominance and strong
3873   // post-dominance.
3874   const BasicBlock *BB = PoisonI->getParent();
3875 
3876   // Set of instructions that we have proved will yield poison if PoisonI
3877   // does.
3878   SmallSet<const Value *, 16> YieldsPoison;
3879   SmallSet<const BasicBlock *, 4> Visited;
3880   YieldsPoison.insert(PoisonI);
3881   Visited.insert(PoisonI->getParent());
3882 
3883   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
3884 
3885   unsigned Iter = 0;
3886   while (Iter++ < MaxDepth) {
3887     for (auto &I : make_range(Begin, End)) {
3888       if (&I != PoisonI) {
3889         const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
3890         if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
3891           return true;
3892         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
3893           return false;
3894       }
3895 
3896       // Mark poison that propagates from I through uses of I.
3897       if (YieldsPoison.count(&I)) {
3898         for (const User *User : I.users()) {
3899           const Instruction *UserI = cast<Instruction>(User);
3900           if (propagatesFullPoison(UserI))
3901             YieldsPoison.insert(User);
3902         }
3903       }
3904     }
3905 
3906     if (auto *NextBB = BB->getSingleSuccessor()) {
3907       if (Visited.insert(NextBB).second) {
3908         BB = NextBB;
3909         Begin = BB->getFirstNonPHI()->getIterator();
3910         End = BB->end();
3911         continue;
3912       }
3913     }
3914 
3915     break;
3916   };
3917   return false;
3918 }
3919 
3920 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
3921   if (FMF.noNaNs())
3922     return true;
3923 
3924   if (auto *C = dyn_cast<ConstantFP>(V))
3925     return !C->isNaN();
3926   return false;
3927 }
3928 
3929 static bool isKnownNonZero(const Value *V) {
3930   if (auto *C = dyn_cast<ConstantFP>(V))
3931     return !C->isZero();
3932   return false;
3933 }
3934 
3935 /// Match non-obvious integer minimum and maximum sequences.
3936 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
3937                                        Value *CmpLHS, Value *CmpRHS,
3938                                        Value *TrueVal, Value *FalseVal,
3939                                        Value *&LHS, Value *&RHS) {
3940   // Assume success. If there's no match, callers should not use these anyway.
3941   LHS = TrueVal;
3942   RHS = FalseVal;
3943 
3944   // Recognize variations of:
3945   // CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
3946   const APInt *C1;
3947   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
3948     const APInt *C2;
3949 
3950     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
3951     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
3952         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
3953       return {SPF_SMAX, SPNB_NA, false};
3954 
3955     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
3956     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
3957         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
3958       return {SPF_SMIN, SPNB_NA, false};
3959 
3960     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
3961     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
3962         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
3963       return {SPF_UMAX, SPNB_NA, false};
3964 
3965     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
3966     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
3967         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
3968       return {SPF_UMIN, SPNB_NA, false};
3969   }
3970 
3971   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
3972     return {SPF_UNKNOWN, SPNB_NA, false};
3973 
3974   // Z = X -nsw Y
3975   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
3976   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
3977   if (match(TrueVal, m_Zero()) &&
3978       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
3979     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
3980 
3981   // Z = X -nsw Y
3982   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
3983   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
3984   if (match(FalseVal, m_Zero()) &&
3985       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
3986     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
3987 
3988   if (!match(CmpRHS, m_APInt(C1)))
3989     return {SPF_UNKNOWN, SPNB_NA, false};
3990 
3991   // An unsigned min/max can be written with a signed compare.
3992   const APInt *C2;
3993   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
3994       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
3995     // Is the sign bit set?
3996     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
3997     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
3998     if (Pred == CmpInst::ICMP_SLT && *C1 == 0 && C2->isMaxSignedValue())
3999       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4000 
4001     // Is the sign bit clear?
4002     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4003     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4004     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4005         C2->isMinSignedValue())
4006       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4007   }
4008 
4009   // Look through 'not' ops to find disguised signed min/max.
4010   // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4011   // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4012   if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4013       match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4014     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4015 
4016   // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4017   // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4018   if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4019       match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4020     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4021 
4022   return {SPF_UNKNOWN, SPNB_NA, false};
4023 }
4024 
4025 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4026                                               FastMathFlags FMF,
4027                                               Value *CmpLHS, Value *CmpRHS,
4028                                               Value *TrueVal, Value *FalseVal,
4029                                               Value *&LHS, Value *&RHS) {
4030   LHS = CmpLHS;
4031   RHS = CmpRHS;
4032 
4033   // If the predicate is an "or-equal"  (FP) predicate, then signed zeroes may
4034   // return inconsistent results between implementations.
4035   //   (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4036   //   minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4037   // Therefore we behave conservatively and only proceed if at least one of the
4038   // operands is known to not be zero, or if we don't care about signed zeroes.
4039   switch (Pred) {
4040   default: break;
4041   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4042   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4043     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4044         !isKnownNonZero(CmpRHS))
4045       return {SPF_UNKNOWN, SPNB_NA, false};
4046   }
4047 
4048   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4049   bool Ordered = false;
4050 
4051   // When given one NaN and one non-NaN input:
4052   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4053   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4054   //     ordered comparison fails), which could be NaN or non-NaN.
4055   // so here we discover exactly what NaN behavior is required/accepted.
4056   if (CmpInst::isFPPredicate(Pred)) {
4057     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4058     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4059 
4060     if (LHSSafe && RHSSafe) {
4061       // Both operands are known non-NaN.
4062       NaNBehavior = SPNB_RETURNS_ANY;
4063     } else if (CmpInst::isOrdered(Pred)) {
4064       // An ordered comparison will return false when given a NaN, so it
4065       // returns the RHS.
4066       Ordered = true;
4067       if (LHSSafe)
4068         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4069         NaNBehavior = SPNB_RETURNS_NAN;
4070       else if (RHSSafe)
4071         NaNBehavior = SPNB_RETURNS_OTHER;
4072       else
4073         // Completely unsafe.
4074         return {SPF_UNKNOWN, SPNB_NA, false};
4075     } else {
4076       Ordered = false;
4077       // An unordered comparison will return true when given a NaN, so it
4078       // returns the LHS.
4079       if (LHSSafe)
4080         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4081         NaNBehavior = SPNB_RETURNS_OTHER;
4082       else if (RHSSafe)
4083         NaNBehavior = SPNB_RETURNS_NAN;
4084       else
4085         // Completely unsafe.
4086         return {SPF_UNKNOWN, SPNB_NA, false};
4087     }
4088   }
4089 
4090   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4091     std::swap(CmpLHS, CmpRHS);
4092     Pred = CmpInst::getSwappedPredicate(Pred);
4093     if (NaNBehavior == SPNB_RETURNS_NAN)
4094       NaNBehavior = SPNB_RETURNS_OTHER;
4095     else if (NaNBehavior == SPNB_RETURNS_OTHER)
4096       NaNBehavior = SPNB_RETURNS_NAN;
4097     Ordered = !Ordered;
4098   }
4099 
4100   // ([if]cmp X, Y) ? X : Y
4101   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4102     switch (Pred) {
4103     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4104     case ICmpInst::ICMP_UGT:
4105     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4106     case ICmpInst::ICMP_SGT:
4107     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4108     case ICmpInst::ICMP_ULT:
4109     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4110     case ICmpInst::ICMP_SLT:
4111     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4112     case FCmpInst::FCMP_UGT:
4113     case FCmpInst::FCMP_UGE:
4114     case FCmpInst::FCMP_OGT:
4115     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4116     case FCmpInst::FCMP_ULT:
4117     case FCmpInst::FCMP_ULE:
4118     case FCmpInst::FCMP_OLT:
4119     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4120     }
4121   }
4122 
4123   const APInt *C1;
4124   if (match(CmpRHS, m_APInt(C1))) {
4125     if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4126         (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4127 
4128       // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4129       // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4130       if (Pred == ICmpInst::ICMP_SGT && (*C1 == 0 || C1->isAllOnesValue())) {
4131         return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4132       }
4133 
4134       // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4135       // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4136       if (Pred == ICmpInst::ICMP_SLT && (*C1 == 0 || *C1 == 1)) {
4137         return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4138       }
4139     }
4140   }
4141 
4142   return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4143 }
4144 
4145 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4146                               Instruction::CastOps *CastOp) {
4147   auto *Cast1 = dyn_cast<CastInst>(V1);
4148   if (!Cast1)
4149     return nullptr;
4150 
4151   *CastOp = Cast1->getOpcode();
4152   Type *SrcTy = Cast1->getSrcTy();
4153   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4154     // If V1 and V2 are both the same cast from the same type, look through V1.
4155     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4156       return Cast2->getOperand(0);
4157     return nullptr;
4158   }
4159 
4160   auto *C = dyn_cast<Constant>(V2);
4161   if (!C)
4162     return nullptr;
4163 
4164   Constant *CastedTo = nullptr;
4165   switch (*CastOp) {
4166   case Instruction::ZExt:
4167     if (CmpI->isUnsigned())
4168       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4169     break;
4170   case Instruction::SExt:
4171     if (CmpI->isSigned())
4172       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4173     break;
4174   case Instruction::Trunc:
4175     CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4176     break;
4177   case Instruction::FPTrunc:
4178     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4179     break;
4180   case Instruction::FPExt:
4181     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4182     break;
4183   case Instruction::FPToUI:
4184     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4185     break;
4186   case Instruction::FPToSI:
4187     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4188     break;
4189   case Instruction::UIToFP:
4190     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4191     break;
4192   case Instruction::SIToFP:
4193     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4194     break;
4195   default:
4196     break;
4197   }
4198 
4199   if (!CastedTo)
4200     return nullptr;
4201 
4202   // Make sure the cast doesn't lose any information.
4203   Constant *CastedBack =
4204       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4205   if (CastedBack != C)
4206     return nullptr;
4207 
4208   return CastedTo;
4209 }
4210 
4211 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4212                                              Instruction::CastOps *CastOp) {
4213   SelectInst *SI = dyn_cast<SelectInst>(V);
4214   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4215 
4216   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4217   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4218 
4219   CmpInst::Predicate Pred = CmpI->getPredicate();
4220   Value *CmpLHS = CmpI->getOperand(0);
4221   Value *CmpRHS = CmpI->getOperand(1);
4222   Value *TrueVal = SI->getTrueValue();
4223   Value *FalseVal = SI->getFalseValue();
4224   FastMathFlags FMF;
4225   if (isa<FPMathOperator>(CmpI))
4226     FMF = CmpI->getFastMathFlags();
4227 
4228   // Bail out early.
4229   if (CmpI->isEquality())
4230     return {SPF_UNKNOWN, SPNB_NA, false};
4231 
4232   // Deal with type mismatches.
4233   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4234     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
4235       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4236                                   cast<CastInst>(TrueVal)->getOperand(0), C,
4237                                   LHS, RHS);
4238     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
4239       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4240                                   C, cast<CastInst>(FalseVal)->getOperand(0),
4241                                   LHS, RHS);
4242   }
4243   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4244                               LHS, RHS);
4245 }
4246 
4247 /// Return true if "icmp Pred LHS RHS" is always true.
4248 static bool isTruePredicate(CmpInst::Predicate Pred,
4249                             const Value *LHS, const Value *RHS,
4250                             const DataLayout &DL, unsigned Depth,
4251                             AssumptionCache *AC, const Instruction *CxtI,
4252                             const DominatorTree *DT) {
4253   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4254   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4255     return true;
4256 
4257   switch (Pred) {
4258   default:
4259     return false;
4260 
4261   case CmpInst::ICMP_SLE: {
4262     const APInt *C;
4263 
4264     // LHS s<= LHS +_{nsw} C   if C >= 0
4265     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4266       return !C->isNegative();
4267     return false;
4268   }
4269 
4270   case CmpInst::ICMP_ULE: {
4271     const APInt *C;
4272 
4273     // LHS u<= LHS +_{nuw} C   for any C
4274     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4275       return true;
4276 
4277     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4278     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4279                                        const Value *&X,
4280                                        const APInt *&CA, const APInt *&CB) {
4281       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4282           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4283         return true;
4284 
4285       // If X & C == 0 then (X | C) == X +_{nuw} C
4286       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4287           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4288         unsigned BitWidth = CA->getBitWidth();
4289         APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
4290         computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
4291 
4292         if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
4293           return true;
4294       }
4295 
4296       return false;
4297     };
4298 
4299     const Value *X;
4300     const APInt *CLHS, *CRHS;
4301     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4302       return CLHS->ule(*CRHS);
4303 
4304     return false;
4305   }
4306   }
4307 }
4308 
4309 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4310 /// ALHS ARHS" is true.  Otherwise, return None.
4311 static Optional<bool>
4312 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4313                       const Value *ARHS, const Value *BLHS,
4314                       const Value *BRHS, const DataLayout &DL,
4315                       unsigned Depth, AssumptionCache *AC,
4316                       const Instruction *CxtI, const DominatorTree *DT) {
4317   switch (Pred) {
4318   default:
4319     return None;
4320 
4321   case CmpInst::ICMP_SLT:
4322   case CmpInst::ICMP_SLE:
4323     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
4324                         DT) &&
4325         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4326       return true;
4327     return None;
4328 
4329   case CmpInst::ICMP_ULT:
4330   case CmpInst::ICMP_ULE:
4331     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
4332                         DT) &&
4333         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT))
4334       return true;
4335     return None;
4336   }
4337 }
4338 
4339 /// Return true if the operands of the two compares match.  IsSwappedOps is true
4340 /// when the operands match, but are swapped.
4341 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4342                           const Value *BLHS, const Value *BRHS,
4343                           bool &IsSwappedOps) {
4344 
4345   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4346   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4347   return IsMatchingOps || IsSwappedOps;
4348 }
4349 
4350 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4351 /// true.  Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4352 /// BRHS" is false.  Otherwise, return None if we can't infer anything.
4353 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4354                                                     const Value *ALHS,
4355                                                     const Value *ARHS,
4356                                                     CmpInst::Predicate BPred,
4357                                                     const Value *BLHS,
4358                                                     const Value *BRHS,
4359                                                     bool IsSwappedOps) {
4360   // Canonicalize the operands so they're matching.
4361   if (IsSwappedOps) {
4362     std::swap(BLHS, BRHS);
4363     BPred = ICmpInst::getSwappedPredicate(BPred);
4364   }
4365   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4366     return true;
4367   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4368     return false;
4369 
4370   return None;
4371 }
4372 
4373 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4374 /// true.  Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4375 /// C2" is false.  Otherwise, return None if we can't infer anything.
4376 static Optional<bool>
4377 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4378                                  const ConstantInt *C1,
4379                                  CmpInst::Predicate BPred,
4380                                  const Value *BLHS, const ConstantInt *C2) {
4381   assert(ALHS == BLHS && "LHS operands must match.");
4382   ConstantRange DomCR =
4383       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4384   ConstantRange CR =
4385       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4386   ConstantRange Intersection = DomCR.intersectWith(CR);
4387   ConstantRange Difference = DomCR.difference(CR);
4388   if (Intersection.isEmptySet())
4389     return false;
4390   if (Difference.isEmptySet())
4391     return true;
4392   return None;
4393 }
4394 
4395 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4396                                         const DataLayout &DL, bool InvertAPred,
4397                                         unsigned Depth, AssumptionCache *AC,
4398                                         const Instruction *CxtI,
4399                                         const DominatorTree *DT) {
4400   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example.
4401   if (LHS->getType() != RHS->getType())
4402     return None;
4403 
4404   Type *OpTy = LHS->getType();
4405   assert(OpTy->getScalarType()->isIntegerTy(1));
4406 
4407   // LHS ==> RHS by definition
4408   if (!InvertAPred && LHS == RHS)
4409     return true;
4410 
4411   if (OpTy->isVectorTy())
4412     // TODO: extending the code below to handle vectors
4413     return None;
4414   assert(OpTy->isIntegerTy(1) && "implied by above");
4415 
4416   ICmpInst::Predicate APred, BPred;
4417   Value *ALHS, *ARHS;
4418   Value *BLHS, *BRHS;
4419 
4420   if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
4421       !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
4422     return None;
4423 
4424   if (InvertAPred)
4425     APred = CmpInst::getInversePredicate(APred);
4426 
4427   // Can we infer anything when the two compares have matching operands?
4428   bool IsSwappedOps;
4429   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4430     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4431             APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4432       return Implication;
4433     // No amount of additional analysis will infer the second condition, so
4434     // early exit.
4435     return None;
4436   }
4437 
4438   // Can we infer anything when the LHS operands match and the RHS operands are
4439   // constants (not necessarily matching)?
4440   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4441     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4442             APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4443             cast<ConstantInt>(BRHS)))
4444       return Implication;
4445     // No amount of additional analysis will infer the second condition, so
4446     // early exit.
4447     return None;
4448   }
4449 
4450   if (APred == BPred)
4451     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
4452                                  CxtI, DT);
4453 
4454   return None;
4455 }
4456