1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GetElementPtrTypeIterator.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Operator.h"
59 #include "llvm/IR/PatternMatch.h"
60 #include "llvm/IR/Type.h"
61 #include "llvm/IR/User.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/KnownBits.h"
68 #include "llvm/Support/MathExtras.h"
69 #include <algorithm>
70 #include <array>
71 #include <cassert>
72 #include <cstdint>
73 #include <iterator>
74 #include <utility>
75 
76 using namespace llvm;
77 using namespace llvm::PatternMatch;
78 
79 const unsigned MaxDepth = 6;
80 
81 // Controls the number of uses of the value searched for possible
82 // dominating comparisons.
83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84                                               cl::Hidden, cl::init(20));
85 
86 // This optimization is known to cause performance regressions is some cases,
87 // keep it under a temporary flag for now.
88 static cl::opt<bool>
89 DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits",
90                               cl::Hidden, cl::init(true));
91 
92 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
93 /// returns the element type's bitwidth.
94 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
95   if (unsigned BitWidth = Ty->getScalarSizeInBits())
96     return BitWidth;
97 
98   return DL.getPointerTypeSizeInBits(Ty);
99 }
100 
101 namespace {
102 
103 // Simplifying using an assume can only be done in a particular control-flow
104 // context (the context instruction provides that context). If an assume and
105 // the context instruction are not in the same block then the DT helps in
106 // figuring out if we can use it.
107 struct Query {
108   const DataLayout &DL;
109   AssumptionCache *AC;
110   const Instruction *CxtI;
111   const DominatorTree *DT;
112 
113   // Unlike the other analyses, this may be a nullptr because not all clients
114   // provide it currently.
115   OptimizationRemarkEmitter *ORE;
116 
117   /// Set of assumptions that should be excluded from further queries.
118   /// This is because of the potential for mutual recursion to cause
119   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
120   /// classic case of this is assume(x = y), which will attempt to determine
121   /// bits in x from bits in y, which will attempt to determine bits in y from
122   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
123   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
124   /// (all of which can call computeKnownBits), and so on.
125   std::array<const Value *, MaxDepth> Excluded;
126 
127   unsigned NumExcluded = 0;
128 
129   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
130         const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
131       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {}
132 
133   Query(const Query &Q, const Value *NewExcl)
134       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
135         NumExcluded(Q.NumExcluded) {
136     Excluded = Q.Excluded;
137     Excluded[NumExcluded++] = NewExcl;
138     assert(NumExcluded <= Excluded.size());
139   }
140 
141   bool isExcluded(const Value *Value) const {
142     if (NumExcluded == 0)
143       return false;
144     auto End = Excluded.begin() + NumExcluded;
145     return std::find(Excluded.begin(), End, Value) != End;
146   }
147 };
148 
149 } // end anonymous namespace
150 
151 // Given the provided Value and, potentially, a context instruction, return
152 // the preferred context instruction (if any).
153 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
154   // If we've been provided with a context instruction, then use that (provided
155   // it has been inserted).
156   if (CxtI && CxtI->getParent())
157     return CxtI;
158 
159   // If the value is really an already-inserted instruction, then use that.
160   CxtI = dyn_cast<Instruction>(V);
161   if (CxtI && CxtI->getParent())
162     return CxtI;
163 
164   return nullptr;
165 }
166 
167 static void computeKnownBits(const Value *V, KnownBits &Known,
168                              unsigned Depth, const Query &Q);
169 
170 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
171                             const DataLayout &DL, unsigned Depth,
172                             AssumptionCache *AC, const Instruction *CxtI,
173                             const DominatorTree *DT,
174                             OptimizationRemarkEmitter *ORE) {
175   ::computeKnownBits(V, Known, Depth,
176                      Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
177 }
178 
179 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
180                                   const Query &Q);
181 
182 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
183                                  unsigned Depth, AssumptionCache *AC,
184                                  const Instruction *CxtI,
185                                  const DominatorTree *DT,
186                                  OptimizationRemarkEmitter *ORE) {
187   return ::computeKnownBits(V, Depth,
188                             Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
189 }
190 
191 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
192                                const DataLayout &DL,
193                                AssumptionCache *AC, const Instruction *CxtI,
194                                const DominatorTree *DT) {
195   assert(LHS->getType() == RHS->getType() &&
196          "LHS and RHS should have the same type");
197   assert(LHS->getType()->isIntOrIntVectorTy() &&
198          "LHS and RHS should be integers");
199   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
200   KnownBits LHSKnown(IT->getBitWidth());
201   KnownBits RHSKnown(IT->getBitWidth());
202   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT);
203   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT);
204   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
205 }
206 
207 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
208   for (const User *U : CxtI->users()) {
209     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
210       if (IC->isEquality())
211         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
212           if (C->isNullValue())
213             continue;
214     return false;
215   }
216   return true;
217 }
218 
219 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
220                                    const Query &Q);
221 
222 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
223                                   bool OrZero,
224                                   unsigned Depth, AssumptionCache *AC,
225                                   const Instruction *CxtI,
226                                   const DominatorTree *DT) {
227   return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
228                                   Query(DL, AC, safeCxtI(V, CxtI), DT));
229 }
230 
231 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
232 
233 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
234                           AssumptionCache *AC, const Instruction *CxtI,
235                           const DominatorTree *DT) {
236   return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
237 }
238 
239 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
240                               unsigned Depth,
241                               AssumptionCache *AC, const Instruction *CxtI,
242                               const DominatorTree *DT) {
243   KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
244   return Known.isNonNegative();
245 }
246 
247 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
248                            AssumptionCache *AC, const Instruction *CxtI,
249                            const DominatorTree *DT) {
250   if (auto *CI = dyn_cast<ConstantInt>(V))
251     return CI->getValue().isStrictlyPositive();
252 
253   // TODO: We'd doing two recursive queries here.  We should factor this such
254   // that only a single query is needed.
255   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
256     isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
257 }
258 
259 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
260                            AssumptionCache *AC, const Instruction *CxtI,
261                            const DominatorTree *DT) {
262   KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
263   return Known.isNegative();
264 }
265 
266 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
267 
268 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
269                            const DataLayout &DL,
270                            AssumptionCache *AC, const Instruction *CxtI,
271                            const DominatorTree *DT) {
272   return ::isKnownNonEqual(V1, V2, Query(DL, AC,
273                                          safeCxtI(V1, safeCxtI(V2, CxtI)),
274                                          DT));
275 }
276 
277 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
278                               const Query &Q);
279 
280 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
281                              const DataLayout &DL,
282                              unsigned Depth, AssumptionCache *AC,
283                              const Instruction *CxtI, const DominatorTree *DT) {
284   return ::MaskedValueIsZero(V, Mask, Depth,
285                              Query(DL, AC, safeCxtI(V, CxtI), DT));
286 }
287 
288 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
289                                    const Query &Q);
290 
291 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
292                                   unsigned Depth, AssumptionCache *AC,
293                                   const Instruction *CxtI,
294                                   const DominatorTree *DT) {
295   return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
296 }
297 
298 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
299                                    bool NSW,
300                                    KnownBits &KnownOut, KnownBits &Known2,
301                                    unsigned Depth, const Query &Q) {
302   unsigned BitWidth = KnownOut.getBitWidth();
303 
304   // If an initial sequence of bits in the result is not needed, the
305   // corresponding bits in the operands are not needed.
306   KnownBits LHSKnown(BitWidth);
307   computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
308   computeKnownBits(Op1, Known2, Depth + 1, Q);
309 
310   KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
311 }
312 
313 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
314                                 KnownBits &Known, KnownBits &Known2,
315                                 unsigned Depth, const Query &Q) {
316   unsigned BitWidth = Known.getBitWidth();
317   computeKnownBits(Op1, Known, Depth + 1, Q);
318   computeKnownBits(Op0, Known2, Depth + 1, Q);
319 
320   bool isKnownNegative = false;
321   bool isKnownNonNegative = false;
322   // If the multiplication is known not to overflow, compute the sign bit.
323   if (NSW) {
324     if (Op0 == Op1) {
325       // The product of a number with itself is non-negative.
326       isKnownNonNegative = true;
327     } else {
328       bool isKnownNonNegativeOp1 = Known.isNonNegative();
329       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
330       bool isKnownNegativeOp1 = Known.isNegative();
331       bool isKnownNegativeOp0 = Known2.isNegative();
332       // The product of two numbers with the same sign is non-negative.
333       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
334         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
335       // The product of a negative number and a non-negative number is either
336       // negative or zero.
337       if (!isKnownNonNegative)
338         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
339                            isKnownNonZero(Op0, Depth, Q)) ||
340                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
341                            isKnownNonZero(Op1, Depth, Q));
342     }
343   }
344 
345   // If low bits are zero in either operand, output low known-0 bits.
346   // Also compute a conservative estimate for high known-0 bits.
347   // More trickiness is possible, but this is sufficient for the
348   // interesting case of alignment computation.
349   unsigned TrailZ = Known.countMinTrailingZeros() +
350                     Known2.countMinTrailingZeros();
351   unsigned LeadZ =  std::max(Known.countMinLeadingZeros() +
352                              Known2.countMinLeadingZeros(),
353                              BitWidth) - BitWidth;
354 
355   TrailZ = std::min(TrailZ, BitWidth);
356   LeadZ = std::min(LeadZ, BitWidth);
357   Known.resetAll();
358   Known.Zero.setLowBits(TrailZ);
359   Known.Zero.setHighBits(LeadZ);
360 
361   // Only make use of no-wrap flags if we failed to compute the sign bit
362   // directly.  This matters if the multiplication always overflows, in
363   // which case we prefer to follow the result of the direct computation,
364   // though as the program is invoking undefined behaviour we can choose
365   // whatever we like here.
366   if (isKnownNonNegative && !Known.isNegative())
367     Known.makeNonNegative();
368   else if (isKnownNegative && !Known.isNonNegative())
369     Known.makeNegative();
370 }
371 
372 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
373                                              KnownBits &Known) {
374   unsigned BitWidth = Known.getBitWidth();
375   unsigned NumRanges = Ranges.getNumOperands() / 2;
376   assert(NumRanges >= 1);
377 
378   Known.Zero.setAllBits();
379   Known.One.setAllBits();
380 
381   for (unsigned i = 0; i < NumRanges; ++i) {
382     ConstantInt *Lower =
383         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
384     ConstantInt *Upper =
385         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
386     ConstantRange Range(Lower->getValue(), Upper->getValue());
387 
388     // The first CommonPrefixBits of all values in Range are equal.
389     unsigned CommonPrefixBits =
390         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
391 
392     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
393     Known.One &= Range.getUnsignedMax() & Mask;
394     Known.Zero &= ~Range.getUnsignedMax() & Mask;
395   }
396 }
397 
398 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
399   SmallVector<const Value *, 16> WorkSet(1, I);
400   SmallPtrSet<const Value *, 32> Visited;
401   SmallPtrSet<const Value *, 16> EphValues;
402 
403   // The instruction defining an assumption's condition itself is always
404   // considered ephemeral to that assumption (even if it has other
405   // non-ephemeral users). See r246696's test case for an example.
406   if (is_contained(I->operands(), E))
407     return true;
408 
409   while (!WorkSet.empty()) {
410     const Value *V = WorkSet.pop_back_val();
411     if (!Visited.insert(V).second)
412       continue;
413 
414     // If all uses of this value are ephemeral, then so is this value.
415     if (llvm::all_of(V->users(), [&](const User *U) {
416                                    return EphValues.count(U);
417                                  })) {
418       if (V == E)
419         return true;
420 
421       if (V == I || isSafeToSpeculativelyExecute(V)) {
422        EphValues.insert(V);
423        if (const User *U = dyn_cast<User>(V))
424          for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
425               J != JE; ++J)
426            WorkSet.push_back(*J);
427       }
428     }
429   }
430 
431   return false;
432 }
433 
434 // Is this an intrinsic that cannot be speculated but also cannot trap?
435 static bool isAssumeLikeIntrinsic(const Instruction *I) {
436   if (const CallInst *CI = dyn_cast<CallInst>(I))
437     if (Function *F = CI->getCalledFunction())
438       switch (F->getIntrinsicID()) {
439       default: break;
440       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
441       case Intrinsic::assume:
442       case Intrinsic::dbg_declare:
443       case Intrinsic::dbg_value:
444       case Intrinsic::invariant_start:
445       case Intrinsic::invariant_end:
446       case Intrinsic::lifetime_start:
447       case Intrinsic::lifetime_end:
448       case Intrinsic::objectsize:
449       case Intrinsic::ptr_annotation:
450       case Intrinsic::var_annotation:
451         return true;
452       }
453 
454   return false;
455 }
456 
457 bool llvm::isValidAssumeForContext(const Instruction *Inv,
458                                    const Instruction *CxtI,
459                                    const DominatorTree *DT) {
460   // There are two restrictions on the use of an assume:
461   //  1. The assume must dominate the context (or the control flow must
462   //     reach the assume whenever it reaches the context).
463   //  2. The context must not be in the assume's set of ephemeral values
464   //     (otherwise we will use the assume to prove that the condition
465   //     feeding the assume is trivially true, thus causing the removal of
466   //     the assume).
467 
468   if (DT) {
469     if (DT->dominates(Inv, CxtI))
470       return true;
471   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
472     // We don't have a DT, but this trivially dominates.
473     return true;
474   }
475 
476   // With or without a DT, the only remaining case we will check is if the
477   // instructions are in the same BB.  Give up if that is not the case.
478   if (Inv->getParent() != CxtI->getParent())
479     return false;
480 
481   // If we have a dom tree, then we now know that the assume doens't dominate
482   // the other instruction.  If we don't have a dom tree then we can check if
483   // the assume is first in the BB.
484   if (!DT) {
485     // Search forward from the assume until we reach the context (or the end
486     // of the block); the common case is that the assume will come first.
487     for (auto I = std::next(BasicBlock::const_iterator(Inv)),
488          IE = Inv->getParent()->end(); I != IE; ++I)
489       if (&*I == CxtI)
490         return true;
491   }
492 
493   // The context comes first, but they're both in the same block. Make sure
494   // there is nothing in between that might interrupt the control flow.
495   for (BasicBlock::const_iterator I =
496          std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
497        I != IE; ++I)
498     if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
499       return false;
500 
501   return !isEphemeralValueOf(Inv, CxtI);
502 }
503 
504 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
505                                        unsigned Depth, const Query &Q) {
506   // Use of assumptions is context-sensitive. If we don't have a context, we
507   // cannot use them!
508   if (!Q.AC || !Q.CxtI)
509     return;
510 
511   unsigned BitWidth = Known.getBitWidth();
512 
513   // Note that the patterns below need to be kept in sync with the code
514   // in AssumptionCache::updateAffectedValues.
515 
516   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
517     if (!AssumeVH)
518       continue;
519     CallInst *I = cast<CallInst>(AssumeVH);
520     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
521            "Got assumption for the wrong function!");
522     if (Q.isExcluded(I))
523       continue;
524 
525     // Warning: This loop can end up being somewhat performance sensetive.
526     // We're running this loop for once for each value queried resulting in a
527     // runtime of ~O(#assumes * #values).
528 
529     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
530            "must be an assume intrinsic");
531 
532     Value *Arg = I->getArgOperand(0);
533 
534     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
535       assert(BitWidth == 1 && "assume operand is not i1?");
536       Known.setAllOnes();
537       return;
538     }
539     if (match(Arg, m_Not(m_Specific(V))) &&
540         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
541       assert(BitWidth == 1 && "assume operand is not i1?");
542       Known.setAllZero();
543       return;
544     }
545 
546     // The remaining tests are all recursive, so bail out if we hit the limit.
547     if (Depth == MaxDepth)
548       continue;
549 
550     Value *A, *B;
551     auto m_V = m_CombineOr(m_Specific(V),
552                            m_CombineOr(m_PtrToInt(m_Specific(V)),
553                            m_BitCast(m_Specific(V))));
554 
555     CmpInst::Predicate Pred;
556     ConstantInt *C;
557     // assume(v = a)
558     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
559         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
560       KnownBits RHSKnown(BitWidth);
561       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
562       Known.Zero |= RHSKnown.Zero;
563       Known.One  |= RHSKnown.One;
564     // assume(v & b = a)
565     } else if (match(Arg,
566                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
567                Pred == ICmpInst::ICMP_EQ &&
568                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
569       KnownBits RHSKnown(BitWidth);
570       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
571       KnownBits MaskKnown(BitWidth);
572       computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
573 
574       // For those bits in the mask that are known to be one, we can propagate
575       // known bits from the RHS to V.
576       Known.Zero |= RHSKnown.Zero & MaskKnown.One;
577       Known.One  |= RHSKnown.One  & MaskKnown.One;
578     // assume(~(v & b) = a)
579     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
580                                    m_Value(A))) &&
581                Pred == ICmpInst::ICMP_EQ &&
582                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
583       KnownBits RHSKnown(BitWidth);
584       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
585       KnownBits MaskKnown(BitWidth);
586       computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
587 
588       // For those bits in the mask that are known to be one, we can propagate
589       // inverted known bits from the RHS to V.
590       Known.Zero |= RHSKnown.One  & MaskKnown.One;
591       Known.One  |= RHSKnown.Zero & MaskKnown.One;
592     // assume(v | b = a)
593     } else if (match(Arg,
594                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
595                Pred == ICmpInst::ICMP_EQ &&
596                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
597       KnownBits RHSKnown(BitWidth);
598       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
599       KnownBits BKnown(BitWidth);
600       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
601 
602       // For those bits in B that are known to be zero, we can propagate known
603       // bits from the RHS to V.
604       Known.Zero |= RHSKnown.Zero & BKnown.Zero;
605       Known.One  |= RHSKnown.One  & BKnown.Zero;
606     // assume(~(v | b) = a)
607     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
608                                    m_Value(A))) &&
609                Pred == ICmpInst::ICMP_EQ &&
610                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
611       KnownBits RHSKnown(BitWidth);
612       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
613       KnownBits BKnown(BitWidth);
614       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
615 
616       // For those bits in B that are known to be zero, we can propagate
617       // inverted known bits from the RHS to V.
618       Known.Zero |= RHSKnown.One  & BKnown.Zero;
619       Known.One  |= RHSKnown.Zero & BKnown.Zero;
620     // assume(v ^ b = a)
621     } else if (match(Arg,
622                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
623                Pred == ICmpInst::ICMP_EQ &&
624                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
625       KnownBits RHSKnown(BitWidth);
626       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
627       KnownBits BKnown(BitWidth);
628       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
629 
630       // For those bits in B that are known to be zero, we can propagate known
631       // bits from the RHS to V. For those bits in B that are known to be one,
632       // we can propagate inverted known bits from the RHS to V.
633       Known.Zero |= RHSKnown.Zero & BKnown.Zero;
634       Known.One  |= RHSKnown.One  & BKnown.Zero;
635       Known.Zero |= RHSKnown.One  & BKnown.One;
636       Known.One  |= RHSKnown.Zero & BKnown.One;
637     // assume(~(v ^ b) = a)
638     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
639                                    m_Value(A))) &&
640                Pred == ICmpInst::ICMP_EQ &&
641                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
642       KnownBits RHSKnown(BitWidth);
643       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
644       KnownBits BKnown(BitWidth);
645       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
646 
647       // For those bits in B that are known to be zero, we can propagate
648       // inverted known bits from the RHS to V. For those bits in B that are
649       // known to be one, we can propagate known bits from the RHS to V.
650       Known.Zero |= RHSKnown.One  & BKnown.Zero;
651       Known.One  |= RHSKnown.Zero & BKnown.Zero;
652       Known.Zero |= RHSKnown.Zero & BKnown.One;
653       Known.One  |= RHSKnown.One  & BKnown.One;
654     // assume(v << c = a)
655     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
656                                    m_Value(A))) &&
657                Pred == ICmpInst::ICMP_EQ &&
658                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
659       KnownBits RHSKnown(BitWidth);
660       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
661       // For those bits in RHS that are known, we can propagate them to known
662       // bits in V shifted to the right by C.
663       RHSKnown.Zero.lshrInPlace(C->getZExtValue());
664       Known.Zero |= RHSKnown.Zero;
665       RHSKnown.One.lshrInPlace(C->getZExtValue());
666       Known.One  |= RHSKnown.One;
667     // assume(~(v << c) = a)
668     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
669                                    m_Value(A))) &&
670                Pred == ICmpInst::ICMP_EQ &&
671                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
672       KnownBits RHSKnown(BitWidth);
673       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
674       // For those bits in RHS that are known, we can propagate them inverted
675       // to known bits in V shifted to the right by C.
676       RHSKnown.One.lshrInPlace(C->getZExtValue());
677       Known.Zero |= RHSKnown.One;
678       RHSKnown.Zero.lshrInPlace(C->getZExtValue());
679       Known.One  |= RHSKnown.Zero;
680     // assume(v >> c = a)
681     } else if (match(Arg,
682                      m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
683                               m_Value(A))) &&
684                Pred == ICmpInst::ICMP_EQ &&
685                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
686       KnownBits RHSKnown(BitWidth);
687       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
688       // For those bits in RHS that are known, we can propagate them to known
689       // bits in V shifted to the right by C.
690       Known.Zero |= RHSKnown.Zero << C->getZExtValue();
691       Known.One  |= RHSKnown.One  << C->getZExtValue();
692     // assume(~(v >> c) = a)
693     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
694                                    m_Value(A))) &&
695                Pred == ICmpInst::ICMP_EQ &&
696                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
697       KnownBits RHSKnown(BitWidth);
698       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
699       // For those bits in RHS that are known, we can propagate them inverted
700       // to known bits in V shifted to the right by C.
701       Known.Zero |= RHSKnown.One  << C->getZExtValue();
702       Known.One  |= RHSKnown.Zero << C->getZExtValue();
703     // assume(v >=_s c) where c is non-negative
704     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
705                Pred == ICmpInst::ICMP_SGE &&
706                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
707       KnownBits RHSKnown(BitWidth);
708       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
709 
710       if (RHSKnown.isNonNegative()) {
711         // We know that the sign bit is zero.
712         Known.makeNonNegative();
713       }
714     // assume(v >_s c) where c is at least -1.
715     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
716                Pred == ICmpInst::ICMP_SGT &&
717                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
718       KnownBits RHSKnown(BitWidth);
719       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
720 
721       if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
722         // We know that the sign bit is zero.
723         Known.makeNonNegative();
724       }
725     // assume(v <=_s c) where c is negative
726     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
727                Pred == ICmpInst::ICMP_SLE &&
728                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
729       KnownBits RHSKnown(BitWidth);
730       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
731 
732       if (RHSKnown.isNegative()) {
733         // We know that the sign bit is one.
734         Known.makeNegative();
735       }
736     // assume(v <_s c) where c is non-positive
737     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
738                Pred == ICmpInst::ICMP_SLT &&
739                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
740       KnownBits RHSKnown(BitWidth);
741       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
742 
743       if (RHSKnown.isZero() || RHSKnown.isNegative()) {
744         // We know that the sign bit is one.
745         Known.makeNegative();
746       }
747     // assume(v <=_u c)
748     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
749                Pred == ICmpInst::ICMP_ULE &&
750                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
751       KnownBits RHSKnown(BitWidth);
752       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
753 
754       // Whatever high bits in c are zero are known to be zero.
755       Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
756       // assume(v <_u c)
757     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
758                Pred == ICmpInst::ICMP_ULT &&
759                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
760       KnownBits RHSKnown(BitWidth);
761       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
762 
763       // Whatever high bits in c are zero are known to be zero (if c is a power
764       // of 2, then one more).
765       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
766         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
767       else
768         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
769     }
770   }
771 
772   // If assumptions conflict with each other or previous known bits, then we
773   // have a logical fallacy. It's possible that the assumption is not reachable,
774   // so this isn't a real bug. On the other hand, the program may have undefined
775   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
776   // clear out the known bits, try to warn the user, and hope for the best.
777   if (Known.Zero.intersects(Known.One)) {
778     Known.resetAll();
779 
780     if (Q.ORE) {
781       auto *CxtI = const_cast<Instruction *>(Q.CxtI);
782       OptimizationRemarkAnalysis ORA("value-tracking", "BadAssumption", CxtI);
783       Q.ORE->emit(ORA << "Detected conflicting code assumptions. Program may "
784                          "have undefined behavior, or compiler may have "
785                          "internal error.");
786     }
787   }
788 }
789 
790 // Compute known bits from a shift operator, including those with a
791 // non-constant shift amount. Known is the outputs of this function. Known2 is a
792 // pre-allocated temporary with the/ same bit width as Known. KZF and KOF are
793 // operator-specific functors that, given the known-zero or known-one bits
794 // respectively, and a shift amount, compute the implied known-zero or known-one
795 // bits of the shift operator's result respectively for that shift amount. The
796 // results from calling KZF and KOF are conservatively combined for all
797 // permitted shift amounts.
798 static void computeKnownBitsFromShiftOperator(
799     const Operator *I, KnownBits &Known, KnownBits &Known2,
800     unsigned Depth, const Query &Q,
801     function_ref<APInt(const APInt &, unsigned)> KZF,
802     function_ref<APInt(const APInt &, unsigned)> KOF) {
803   unsigned BitWidth = Known.getBitWidth();
804 
805   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
806     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
807 
808     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
809     Known.Zero = KZF(Known.Zero, ShiftAmt);
810     Known.One  = KOF(Known.One, ShiftAmt);
811     // If there is conflict between Known.Zero and Known.One, this must be an
812     // overflowing left shift, so the shift result is undefined. Clear Known
813     // bits so that other code could propagate this undef.
814     if ((Known.Zero & Known.One) != 0)
815       Known.resetAll();
816 
817     return;
818   }
819 
820   computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
821 
822   // If the shift amount could be greater than or equal to the bit-width of the LHS, the
823   // value could be undef, so we don't know anything about it.
824   if ((~Known.Zero).uge(BitWidth)) {
825     Known.resetAll();
826     return;
827   }
828 
829   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
830   // BitWidth > 64 and any upper bits are known, we'll end up returning the
831   // limit value (which implies all bits are known).
832   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
833   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
834 
835   // It would be more-clearly correct to use the two temporaries for this
836   // calculation. Reusing the APInts here to prevent unnecessary allocations.
837   Known.resetAll();
838 
839   // If we know the shifter operand is nonzero, we can sometimes infer more
840   // known bits. However this is expensive to compute, so be lazy about it and
841   // only compute it when absolutely necessary.
842   Optional<bool> ShifterOperandIsNonZero;
843 
844   // Early exit if we can't constrain any well-defined shift amount.
845   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
846       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
847     ShifterOperandIsNonZero =
848         isKnownNonZero(I->getOperand(1), Depth + 1, Q);
849     if (!*ShifterOperandIsNonZero)
850       return;
851   }
852 
853   computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
854 
855   Known.Zero.setAllBits();
856   Known.One.setAllBits();
857   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
858     // Combine the shifted known input bits only for those shift amounts
859     // compatible with its known constraints.
860     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
861       continue;
862     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
863       continue;
864     // If we know the shifter is nonzero, we may be able to infer more known
865     // bits. This check is sunk down as far as possible to avoid the expensive
866     // call to isKnownNonZero if the cheaper checks above fail.
867     if (ShiftAmt == 0) {
868       if (!ShifterOperandIsNonZero.hasValue())
869         ShifterOperandIsNonZero =
870             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
871       if (*ShifterOperandIsNonZero)
872         continue;
873     }
874 
875     Known.Zero &= KZF(Known2.Zero, ShiftAmt);
876     Known.One  &= KOF(Known2.One, ShiftAmt);
877   }
878 
879   // If there are no compatible shift amounts, then we've proven that the shift
880   // amount must be >= the BitWidth, and the result is undefined. We could
881   // return anything we'd like, but we need to make sure the sets of known bits
882   // stay disjoint (it should be better for some other code to actually
883   // propagate the undef than to pick a value here using known bits).
884   if (Known.Zero.intersects(Known.One))
885     Known.resetAll();
886 }
887 
888 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
889                                          unsigned Depth, const Query &Q) {
890   unsigned BitWidth = Known.getBitWidth();
891 
892   KnownBits Known2(Known);
893   switch (I->getOpcode()) {
894   default: break;
895   case Instruction::Load:
896     if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
897       computeKnownBitsFromRangeMetadata(*MD, Known);
898     break;
899   case Instruction::And: {
900     // If either the LHS or the RHS are Zero, the result is zero.
901     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
902     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
903 
904     // Output known-1 bits are only known if set in both the LHS & RHS.
905     Known.One &= Known2.One;
906     // Output known-0 are known to be clear if zero in either the LHS | RHS.
907     Known.Zero |= Known2.Zero;
908 
909     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
910     // here we handle the more general case of adding any odd number by
911     // matching the form add(x, add(x, y)) where y is odd.
912     // TODO: This could be generalized to clearing any bit set in y where the
913     // following bit is known to be unset in y.
914     Value *Y = nullptr;
915     if (!Known.Zero[0] && !Known.One[0] &&
916         (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
917                                        m_Value(Y))) ||
918          match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
919                                        m_Value(Y))))) {
920       Known2.resetAll();
921       computeKnownBits(Y, Known2, Depth + 1, Q);
922       if (Known2.countMinTrailingOnes() > 0)
923         Known.Zero.setBit(0);
924     }
925     break;
926   }
927   case Instruction::Or:
928     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
929     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
930 
931     // Output known-0 bits are only known if clear in both the LHS & RHS.
932     Known.Zero &= Known2.Zero;
933     // Output known-1 are known to be set if set in either the LHS | RHS.
934     Known.One |= Known2.One;
935     break;
936   case Instruction::Xor: {
937     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
938     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
939 
940     // Output known-0 bits are known if clear or set in both the LHS & RHS.
941     APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
942     // Output known-1 are known to be set if set in only one of the LHS, RHS.
943     Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
944     Known.Zero = std::move(KnownZeroOut);
945     break;
946   }
947   case Instruction::Mul: {
948     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
949     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
950                         Known2, Depth, Q);
951     break;
952   }
953   case Instruction::UDiv: {
954     // For the purposes of computing leading zeros we can conservatively
955     // treat a udiv as a logical right shift by the power of 2 known to
956     // be less than the denominator.
957     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
958     unsigned LeadZ = Known2.countMinLeadingZeros();
959 
960     Known2.resetAll();
961     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
962     unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
963     if (RHSMaxLeadingZeros != BitWidth)
964       LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
965 
966     Known.Zero.setHighBits(LeadZ);
967     break;
968   }
969   case Instruction::Select: {
970     const Value *LHS, *RHS;
971     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
972     if (SelectPatternResult::isMinOrMax(SPF)) {
973       computeKnownBits(RHS, Known, Depth + 1, Q);
974       computeKnownBits(LHS, Known2, Depth + 1, Q);
975     } else {
976       computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
977       computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
978     }
979 
980     unsigned MaxHighOnes = 0;
981     unsigned MaxHighZeros = 0;
982     if (SPF == SPF_SMAX) {
983       // If both sides are negative, the result is negative.
984       if (Known.isNegative() && Known2.isNegative())
985         // We can derive a lower bound on the result by taking the max of the
986         // leading one bits.
987         MaxHighOnes =
988             std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
989       // If either side is non-negative, the result is non-negative.
990       else if (Known.isNonNegative() || Known2.isNonNegative())
991         MaxHighZeros = 1;
992     } else if (SPF == SPF_SMIN) {
993       // If both sides are non-negative, the result is non-negative.
994       if (Known.isNonNegative() && Known2.isNonNegative())
995         // We can derive an upper bound on the result by taking the max of the
996         // leading zero bits.
997         MaxHighZeros = std::max(Known.countMinLeadingZeros(),
998                                 Known2.countMinLeadingZeros());
999       // If either side is negative, the result is negative.
1000       else if (Known.isNegative() || Known2.isNegative())
1001         MaxHighOnes = 1;
1002     } else if (SPF == SPF_UMAX) {
1003       // We can derive a lower bound on the result by taking the max of the
1004       // leading one bits.
1005       MaxHighOnes =
1006           std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1007     } else if (SPF == SPF_UMIN) {
1008       // We can derive an upper bound on the result by taking the max of the
1009       // leading zero bits.
1010       MaxHighZeros =
1011           std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1012     }
1013 
1014     // Only known if known in both the LHS and RHS.
1015     Known.One &= Known2.One;
1016     Known.Zero &= Known2.Zero;
1017     if (MaxHighOnes > 0)
1018       Known.One.setHighBits(MaxHighOnes);
1019     if (MaxHighZeros > 0)
1020       Known.Zero.setHighBits(MaxHighZeros);
1021     break;
1022   }
1023   case Instruction::FPTrunc:
1024   case Instruction::FPExt:
1025   case Instruction::FPToUI:
1026   case Instruction::FPToSI:
1027   case Instruction::SIToFP:
1028   case Instruction::UIToFP:
1029     break; // Can't work with floating point.
1030   case Instruction::PtrToInt:
1031   case Instruction::IntToPtr:
1032     // Fall through and handle them the same as zext/trunc.
1033     LLVM_FALLTHROUGH;
1034   case Instruction::ZExt:
1035   case Instruction::Trunc: {
1036     Type *SrcTy = I->getOperand(0)->getType();
1037 
1038     unsigned SrcBitWidth;
1039     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1040     // which fall through here.
1041     SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1042 
1043     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1044     Known = Known.zextOrTrunc(SrcBitWidth);
1045     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1046     Known = Known.zextOrTrunc(BitWidth);
1047     // Any top bits are known to be zero.
1048     if (BitWidth > SrcBitWidth)
1049       Known.Zero.setBitsFrom(SrcBitWidth);
1050     break;
1051   }
1052   case Instruction::BitCast: {
1053     Type *SrcTy = I->getOperand(0)->getType();
1054     if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1055         // TODO: For now, not handling conversions like:
1056         // (bitcast i64 %x to <2 x i32>)
1057         !I->getType()->isVectorTy()) {
1058       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1059       break;
1060     }
1061     break;
1062   }
1063   case Instruction::SExt: {
1064     // Compute the bits in the result that are not present in the input.
1065     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1066 
1067     Known = Known.trunc(SrcBitWidth);
1068     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1069     // If the sign bit of the input is known set or clear, then we know the
1070     // top bits of the result.
1071     Known = Known.sext(BitWidth);
1072     break;
1073   }
1074   case Instruction::Shl: {
1075     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1076     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1077     auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1078       APInt KZResult = KnownZero << ShiftAmt;
1079       KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1080       // If this shift has "nsw" keyword, then the result is either a poison
1081       // value or has the same sign bit as the first operand.
1082       if (NSW && KnownZero.isSignBitSet())
1083         KZResult.setSignBit();
1084       return KZResult;
1085     };
1086 
1087     auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1088       APInt KOResult = KnownOne << ShiftAmt;
1089       if (NSW && KnownOne.isSignBitSet())
1090         KOResult.setSignBit();
1091       return KOResult;
1092     };
1093 
1094     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1095     break;
1096   }
1097   case Instruction::LShr: {
1098     // (ushr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1099     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1100       APInt KZResult = KnownZero.lshr(ShiftAmt);
1101       // High bits known zero.
1102       KZResult.setHighBits(ShiftAmt);
1103       return KZResult;
1104     };
1105 
1106     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1107       return KnownOne.lshr(ShiftAmt);
1108     };
1109 
1110     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1111     break;
1112   }
1113   case Instruction::AShr: {
1114     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1115     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1116       return KnownZero.ashr(ShiftAmt);
1117     };
1118 
1119     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1120       return KnownOne.ashr(ShiftAmt);
1121     };
1122 
1123     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1124     break;
1125   }
1126   case Instruction::Sub: {
1127     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1128     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1129                            Known, Known2, Depth, Q);
1130     break;
1131   }
1132   case Instruction::Add: {
1133     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1134     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1135                            Known, Known2, Depth, Q);
1136     break;
1137   }
1138   case Instruction::SRem:
1139     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1140       APInt RA = Rem->getValue().abs();
1141       if (RA.isPowerOf2()) {
1142         APInt LowBits = RA - 1;
1143         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1144 
1145         // The low bits of the first operand are unchanged by the srem.
1146         Known.Zero = Known2.Zero & LowBits;
1147         Known.One = Known2.One & LowBits;
1148 
1149         // If the first operand is non-negative or has all low bits zero, then
1150         // the upper bits are all zero.
1151         if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1152           Known.Zero |= ~LowBits;
1153 
1154         // If the first operand is negative and not all low bits are zero, then
1155         // the upper bits are all one.
1156         if (Known2.isNegative() && LowBits.intersects(Known2.One))
1157           Known.One |= ~LowBits;
1158 
1159         assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1160         break;
1161       }
1162     }
1163 
1164     // The sign bit is the LHS's sign bit, except when the result of the
1165     // remainder is zero.
1166     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1167     // If it's known zero, our sign bit is also zero.
1168     if (Known2.isNonNegative())
1169       Known.makeNonNegative();
1170 
1171     break;
1172   case Instruction::URem: {
1173     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1174       const APInt &RA = Rem->getValue();
1175       if (RA.isPowerOf2()) {
1176         APInt LowBits = (RA - 1);
1177         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1178         Known.Zero |= ~LowBits;
1179         Known.One &= LowBits;
1180         break;
1181       }
1182     }
1183 
1184     // Since the result is less than or equal to either operand, any leading
1185     // zero bits in either operand must also exist in the result.
1186     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1187     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1188 
1189     unsigned Leaders =
1190         std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1191     Known.resetAll();
1192     Known.Zero.setHighBits(Leaders);
1193     break;
1194   }
1195 
1196   case Instruction::Alloca: {
1197     const AllocaInst *AI = cast<AllocaInst>(I);
1198     unsigned Align = AI->getAlignment();
1199     if (Align == 0)
1200       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1201 
1202     if (Align > 0)
1203       Known.Zero.setLowBits(countTrailingZeros(Align));
1204     break;
1205   }
1206   case Instruction::GetElementPtr: {
1207     // Analyze all of the subscripts of this getelementptr instruction
1208     // to determine if we can prove known low zero bits.
1209     KnownBits LocalKnown(BitWidth);
1210     computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1211     unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1212 
1213     gep_type_iterator GTI = gep_type_begin(I);
1214     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1215       Value *Index = I->getOperand(i);
1216       if (StructType *STy = GTI.getStructTypeOrNull()) {
1217         // Handle struct member offset arithmetic.
1218 
1219         // Handle case when index is vector zeroinitializer
1220         Constant *CIndex = cast<Constant>(Index);
1221         if (CIndex->isZeroValue())
1222           continue;
1223 
1224         if (CIndex->getType()->isVectorTy())
1225           Index = CIndex->getSplatValue();
1226 
1227         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1228         const StructLayout *SL = Q.DL.getStructLayout(STy);
1229         uint64_t Offset = SL->getElementOffset(Idx);
1230         TrailZ = std::min<unsigned>(TrailZ,
1231                                     countTrailingZeros(Offset));
1232       } else {
1233         // Handle array index arithmetic.
1234         Type *IndexedTy = GTI.getIndexedType();
1235         if (!IndexedTy->isSized()) {
1236           TrailZ = 0;
1237           break;
1238         }
1239         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1240         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1241         LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1242         computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1243         TrailZ = std::min(TrailZ,
1244                           unsigned(countTrailingZeros(TypeSize) +
1245                                    LocalKnown.countMinTrailingZeros()));
1246       }
1247     }
1248 
1249     Known.Zero.setLowBits(TrailZ);
1250     break;
1251   }
1252   case Instruction::PHI: {
1253     const PHINode *P = cast<PHINode>(I);
1254     // Handle the case of a simple two-predecessor recurrence PHI.
1255     // There's a lot more that could theoretically be done here, but
1256     // this is sufficient to catch some interesting cases.
1257     if (P->getNumIncomingValues() == 2) {
1258       for (unsigned i = 0; i != 2; ++i) {
1259         Value *L = P->getIncomingValue(i);
1260         Value *R = P->getIncomingValue(!i);
1261         Operator *LU = dyn_cast<Operator>(L);
1262         if (!LU)
1263           continue;
1264         unsigned Opcode = LU->getOpcode();
1265         // Check for operations that have the property that if
1266         // both their operands have low zero bits, the result
1267         // will have low zero bits.
1268         if (Opcode == Instruction::Add ||
1269             Opcode == Instruction::Sub ||
1270             Opcode == Instruction::And ||
1271             Opcode == Instruction::Or ||
1272             Opcode == Instruction::Mul) {
1273           Value *LL = LU->getOperand(0);
1274           Value *LR = LU->getOperand(1);
1275           // Find a recurrence.
1276           if (LL == I)
1277             L = LR;
1278           else if (LR == I)
1279             L = LL;
1280           else
1281             break;
1282           // Ok, we have a PHI of the form L op= R. Check for low
1283           // zero bits.
1284           computeKnownBits(R, Known2, Depth + 1, Q);
1285 
1286           // We need to take the minimum number of known bits
1287           KnownBits Known3(Known);
1288           computeKnownBits(L, Known3, Depth + 1, Q);
1289 
1290           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1291                                          Known3.countMinTrailingZeros()));
1292 
1293           if (DontImproveNonNegativePhiBits)
1294             break;
1295 
1296           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1297           if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1298             // If initial value of recurrence is nonnegative, and we are adding
1299             // a nonnegative number with nsw, the result can only be nonnegative
1300             // or poison value regardless of the number of times we execute the
1301             // add in phi recurrence. If initial value is negative and we are
1302             // adding a negative number with nsw, the result can only be
1303             // negative or poison value. Similar arguments apply to sub and mul.
1304             //
1305             // (add non-negative, non-negative) --> non-negative
1306             // (add negative, negative) --> negative
1307             if (Opcode == Instruction::Add) {
1308               if (Known2.isNonNegative() && Known3.isNonNegative())
1309                 Known.makeNonNegative();
1310               else if (Known2.isNegative() && Known3.isNegative())
1311                 Known.makeNegative();
1312             }
1313 
1314             // (sub nsw non-negative, negative) --> non-negative
1315             // (sub nsw negative, non-negative) --> negative
1316             else if (Opcode == Instruction::Sub && LL == I) {
1317               if (Known2.isNonNegative() && Known3.isNegative())
1318                 Known.makeNonNegative();
1319               else if (Known2.isNegative() && Known3.isNonNegative())
1320                 Known.makeNegative();
1321             }
1322 
1323             // (mul nsw non-negative, non-negative) --> non-negative
1324             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1325                      Known3.isNonNegative())
1326               Known.makeNonNegative();
1327           }
1328 
1329           break;
1330         }
1331       }
1332     }
1333 
1334     // Unreachable blocks may have zero-operand PHI nodes.
1335     if (P->getNumIncomingValues() == 0)
1336       break;
1337 
1338     // Otherwise take the unions of the known bit sets of the operands,
1339     // taking conservative care to avoid excessive recursion.
1340     if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1341       // Skip if every incoming value references to ourself.
1342       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1343         break;
1344 
1345       Known.Zero.setAllBits();
1346       Known.One.setAllBits();
1347       for (Value *IncValue : P->incoming_values()) {
1348         // Skip direct self references.
1349         if (IncValue == P) continue;
1350 
1351         Known2 = KnownBits(BitWidth);
1352         // Recurse, but cap the recursion to one level, because we don't
1353         // want to waste time spinning around in loops.
1354         computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1355         Known.Zero &= Known2.Zero;
1356         Known.One &= Known2.One;
1357         // If all bits have been ruled out, there's no need to check
1358         // more operands.
1359         if (!Known.Zero && !Known.One)
1360           break;
1361       }
1362     }
1363     break;
1364   }
1365   case Instruction::Call:
1366   case Instruction::Invoke:
1367     // If range metadata is attached to this call, set known bits from that,
1368     // and then intersect with known bits based on other properties of the
1369     // function.
1370     if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1371       computeKnownBitsFromRangeMetadata(*MD, Known);
1372     if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1373       computeKnownBits(RV, Known2, Depth + 1, Q);
1374       Known.Zero |= Known2.Zero;
1375       Known.One |= Known2.One;
1376     }
1377     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1378       switch (II->getIntrinsicID()) {
1379       default: break;
1380       case Intrinsic::bitreverse:
1381         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1382         Known.Zero |= Known2.Zero.reverseBits();
1383         Known.One |= Known2.One.reverseBits();
1384         break;
1385       case Intrinsic::bswap:
1386         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1387         Known.Zero |= Known2.Zero.byteSwap();
1388         Known.One |= Known2.One.byteSwap();
1389         break;
1390       case Intrinsic::ctlz: {
1391         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1392         // If we have a known 1, its position is our upper bound.
1393         unsigned PossibleLZ = Known2.One.countLeadingZeros();
1394         // If this call is undefined for 0, the result will be less than 2^n.
1395         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1396           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1397         unsigned LowBits = Log2_32(PossibleLZ)+1;
1398         Known.Zero.setBitsFrom(LowBits);
1399         break;
1400       }
1401       case Intrinsic::cttz: {
1402         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1403         // If we have a known 1, its position is our upper bound.
1404         unsigned PossibleTZ = Known2.One.countTrailingZeros();
1405         // If this call is undefined for 0, the result will be less than 2^n.
1406         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1407           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1408         unsigned LowBits = Log2_32(PossibleTZ)+1;
1409         Known.Zero.setBitsFrom(LowBits);
1410         break;
1411       }
1412       case Intrinsic::ctpop: {
1413         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1414         // We can bound the space the count needs.  Also, bits known to be zero
1415         // can't contribute to the population.
1416         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1417         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1418         Known.Zero.setBitsFrom(LowBits);
1419         // TODO: we could bound KnownOne using the lower bound on the number
1420         // of bits which might be set provided by popcnt KnownOne2.
1421         break;
1422       }
1423       case Intrinsic::x86_sse42_crc32_64_64:
1424         Known.Zero.setBitsFrom(32);
1425         break;
1426       }
1427     }
1428     break;
1429   case Instruction::ExtractElement:
1430     // Look through extract element. At the moment we keep this simple and skip
1431     // tracking the specific element. But at least we might find information
1432     // valid for all elements of the vector (for example if vector is sign
1433     // extended, shifted, etc).
1434     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1435     break;
1436   case Instruction::ExtractValue:
1437     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1438       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1439       if (EVI->getNumIndices() != 1) break;
1440       if (EVI->getIndices()[0] == 0) {
1441         switch (II->getIntrinsicID()) {
1442         default: break;
1443         case Intrinsic::uadd_with_overflow:
1444         case Intrinsic::sadd_with_overflow:
1445           computeKnownBitsAddSub(true, II->getArgOperand(0),
1446                                  II->getArgOperand(1), false, Known, Known2,
1447                                  Depth, Q);
1448           break;
1449         case Intrinsic::usub_with_overflow:
1450         case Intrinsic::ssub_with_overflow:
1451           computeKnownBitsAddSub(false, II->getArgOperand(0),
1452                                  II->getArgOperand(1), false, Known, Known2,
1453                                  Depth, Q);
1454           break;
1455         case Intrinsic::umul_with_overflow:
1456         case Intrinsic::smul_with_overflow:
1457           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1458                               Known, Known2, Depth, Q);
1459           break;
1460         }
1461       }
1462     }
1463   }
1464 }
1465 
1466 /// Determine which bits of V are known to be either zero or one and return
1467 /// them.
1468 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1469   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1470   computeKnownBits(V, Known, Depth, Q);
1471   return Known;
1472 }
1473 
1474 /// Determine which bits of V are known to be either zero or one and return
1475 /// them in the Known bit set.
1476 ///
1477 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1478 /// we cannot optimize based on the assumption that it is zero without changing
1479 /// it to be an explicit zero.  If we don't change it to zero, other code could
1480 /// optimized based on the contradictory assumption that it is non-zero.
1481 /// Because instcombine aggressively folds operations with undef args anyway,
1482 /// this won't lose us code quality.
1483 ///
1484 /// This function is defined on values with integer type, values with pointer
1485 /// type, and vectors of integers.  In the case
1486 /// where V is a vector, known zero, and known one values are the
1487 /// same width as the vector element, and the bit is set only if it is true
1488 /// for all of the elements in the vector.
1489 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1490                       const Query &Q) {
1491   assert(V && "No Value?");
1492   assert(Depth <= MaxDepth && "Limit Search Depth");
1493   unsigned BitWidth = Known.getBitWidth();
1494 
1495   assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1496           V->getType()->isPtrOrPtrVectorTy()) &&
1497          "Not integer or pointer type!");
1498   assert(Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth &&
1499          "V and Known should have same BitWidth");
1500   (void)BitWidth;
1501 
1502   const APInt *C;
1503   if (match(V, m_APInt(C))) {
1504     // We know all of the bits for a scalar constant or a splat vector constant!
1505     Known.One = *C;
1506     Known.Zero = ~Known.One;
1507     return;
1508   }
1509   // Null and aggregate-zero are all-zeros.
1510   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1511     Known.setAllZero();
1512     return;
1513   }
1514   // Handle a constant vector by taking the intersection of the known bits of
1515   // each element.
1516   if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1517     // We know that CDS must be a vector of integers. Take the intersection of
1518     // each element.
1519     Known.Zero.setAllBits(); Known.One.setAllBits();
1520     APInt Elt(BitWidth, 0);
1521     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1522       Elt = CDS->getElementAsInteger(i);
1523       Known.Zero &= ~Elt;
1524       Known.One &= Elt;
1525     }
1526     return;
1527   }
1528 
1529   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1530     // We know that CV must be a vector of integers. Take the intersection of
1531     // each element.
1532     Known.Zero.setAllBits(); Known.One.setAllBits();
1533     APInt Elt(BitWidth, 0);
1534     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1535       Constant *Element = CV->getAggregateElement(i);
1536       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1537       if (!ElementCI) {
1538         Known.resetAll();
1539         return;
1540       }
1541       Elt = ElementCI->getValue();
1542       Known.Zero &= ~Elt;
1543       Known.One &= Elt;
1544     }
1545     return;
1546   }
1547 
1548   // Start out not knowing anything.
1549   Known.resetAll();
1550 
1551   // We can't imply anything about undefs.
1552   if (isa<UndefValue>(V))
1553     return;
1554 
1555   // There's no point in looking through other users of ConstantData for
1556   // assumptions.  Confirm that we've handled them all.
1557   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1558 
1559   // Limit search depth.
1560   // All recursive calls that increase depth must come after this.
1561   if (Depth == MaxDepth)
1562     return;
1563 
1564   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1565   // the bits of its aliasee.
1566   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1567     if (!GA->isInterposable())
1568       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1569     return;
1570   }
1571 
1572   if (const Operator *I = dyn_cast<Operator>(V))
1573     computeKnownBitsFromOperator(I, Known, Depth, Q);
1574 
1575   // Aligned pointers have trailing zeros - refine Known.Zero set
1576   if (V->getType()->isPointerTy()) {
1577     unsigned Align = V->getPointerAlignment(Q.DL);
1578     if (Align)
1579       Known.Zero.setLowBits(countTrailingZeros(Align));
1580   }
1581 
1582   // computeKnownBitsFromAssume strictly refines Known.
1583   // Therefore, we run them after computeKnownBitsFromOperator.
1584 
1585   // Check whether a nearby assume intrinsic can determine some known bits.
1586   computeKnownBitsFromAssume(V, Known, Depth, Q);
1587 
1588   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1589 }
1590 
1591 /// Return true if the given value is known to have exactly one
1592 /// bit set when defined. For vectors return true if every element is known to
1593 /// be a power of two when defined. Supports values with integer or pointer
1594 /// types and vectors of integers.
1595 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1596                             const Query &Q) {
1597   assert(Depth <= MaxDepth && "Limit Search Depth");
1598 
1599   if (const Constant *C = dyn_cast<Constant>(V)) {
1600     if (C->isNullValue())
1601       return OrZero;
1602 
1603     const APInt *ConstIntOrConstSplatInt;
1604     if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1605       return ConstIntOrConstSplatInt->isPowerOf2();
1606   }
1607 
1608   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1609   // it is shifted off the end then the result is undefined.
1610   if (match(V, m_Shl(m_One(), m_Value())))
1611     return true;
1612 
1613   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1614   // the bottom.  If it is shifted off the bottom then the result is undefined.
1615   if (match(V, m_LShr(m_SignMask(), m_Value())))
1616     return true;
1617 
1618   // The remaining tests are all recursive, so bail out if we hit the limit.
1619   if (Depth++ == MaxDepth)
1620     return false;
1621 
1622   Value *X = nullptr, *Y = nullptr;
1623   // A shift left or a logical shift right of a power of two is a power of two
1624   // or zero.
1625   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1626                  match(V, m_LShr(m_Value(X), m_Value()))))
1627     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1628 
1629   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1630     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1631 
1632   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1633     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1634            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1635 
1636   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1637     // A power of two and'd with anything is a power of two or zero.
1638     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1639         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1640       return true;
1641     // X & (-X) is always a power of two or zero.
1642     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1643       return true;
1644     return false;
1645   }
1646 
1647   // Adding a power-of-two or zero to the same power-of-two or zero yields
1648   // either the original power-of-two, a larger power-of-two or zero.
1649   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1650     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1651     if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1652       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1653           match(X, m_And(m_Value(), m_Specific(Y))))
1654         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1655           return true;
1656       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1657           match(Y, m_And(m_Value(), m_Specific(X))))
1658         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1659           return true;
1660 
1661       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1662       KnownBits LHSBits(BitWidth);
1663       computeKnownBits(X, LHSBits, Depth, Q);
1664 
1665       KnownBits RHSBits(BitWidth);
1666       computeKnownBits(Y, RHSBits, Depth, Q);
1667       // If i8 V is a power of two or zero:
1668       //  ZeroBits: 1 1 1 0 1 1 1 1
1669       // ~ZeroBits: 0 0 0 1 0 0 0 0
1670       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1671         // If OrZero isn't set, we cannot give back a zero result.
1672         // Make sure either the LHS or RHS has a bit set.
1673         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1674           return true;
1675     }
1676   }
1677 
1678   // An exact divide or right shift can only shift off zero bits, so the result
1679   // is a power of two only if the first operand is a power of two and not
1680   // copying a sign bit (sdiv int_min, 2).
1681   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1682       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1683     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1684                                   Depth, Q);
1685   }
1686 
1687   return false;
1688 }
1689 
1690 /// \brief Test whether a GEP's result is known to be non-null.
1691 ///
1692 /// Uses properties inherent in a GEP to try to determine whether it is known
1693 /// to be non-null.
1694 ///
1695 /// Currently this routine does not support vector GEPs.
1696 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1697                               const Query &Q) {
1698   if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1699     return false;
1700 
1701   // FIXME: Support vector-GEPs.
1702   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1703 
1704   // If the base pointer is non-null, we cannot walk to a null address with an
1705   // inbounds GEP in address space zero.
1706   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1707     return true;
1708 
1709   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1710   // If so, then the GEP cannot produce a null pointer, as doing so would
1711   // inherently violate the inbounds contract within address space zero.
1712   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1713        GTI != GTE; ++GTI) {
1714     // Struct types are easy -- they must always be indexed by a constant.
1715     if (StructType *STy = GTI.getStructTypeOrNull()) {
1716       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1717       unsigned ElementIdx = OpC->getZExtValue();
1718       const StructLayout *SL = Q.DL.getStructLayout(STy);
1719       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1720       if (ElementOffset > 0)
1721         return true;
1722       continue;
1723     }
1724 
1725     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1726     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1727       continue;
1728 
1729     // Fast path the constant operand case both for efficiency and so we don't
1730     // increment Depth when just zipping down an all-constant GEP.
1731     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1732       if (!OpC->isZero())
1733         return true;
1734       continue;
1735     }
1736 
1737     // We post-increment Depth here because while isKnownNonZero increments it
1738     // as well, when we pop back up that increment won't persist. We don't want
1739     // to recurse 10k times just because we have 10k GEP operands. We don't
1740     // bail completely out because we want to handle constant GEPs regardless
1741     // of depth.
1742     if (Depth++ >= MaxDepth)
1743       continue;
1744 
1745     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1746       return true;
1747   }
1748 
1749   return false;
1750 }
1751 
1752 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1753 /// ensure that the value it's attached to is never Value?  'RangeType' is
1754 /// is the type of the value described by the range.
1755 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1756   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1757   assert(NumRanges >= 1);
1758   for (unsigned i = 0; i < NumRanges; ++i) {
1759     ConstantInt *Lower =
1760         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1761     ConstantInt *Upper =
1762         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1763     ConstantRange Range(Lower->getValue(), Upper->getValue());
1764     if (Range.contains(Value))
1765       return false;
1766   }
1767   return true;
1768 }
1769 
1770 /// Return true if the given value is known to be non-zero when defined. For
1771 /// vectors, return true if every element is known to be non-zero when
1772 /// defined. For pointers, if the context instruction and dominator tree are
1773 /// specified, perform context-sensitive analysis and return true if the
1774 /// pointer couldn't possibly be null at the specified instruction.
1775 /// Supports values with integer or pointer type and vectors of integers.
1776 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1777   if (auto *C = dyn_cast<Constant>(V)) {
1778     if (C->isNullValue())
1779       return false;
1780     if (isa<ConstantInt>(C))
1781       // Must be non-zero due to null test above.
1782       return true;
1783 
1784     // For constant vectors, check that all elements are undefined or known
1785     // non-zero to determine that the whole vector is known non-zero.
1786     if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1787       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1788         Constant *Elt = C->getAggregateElement(i);
1789         if (!Elt || Elt->isNullValue())
1790           return false;
1791         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1792           return false;
1793       }
1794       return true;
1795     }
1796 
1797     return false;
1798   }
1799 
1800   if (auto *I = dyn_cast<Instruction>(V)) {
1801     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1802       // If the possible ranges don't contain zero, then the value is
1803       // definitely non-zero.
1804       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1805         const APInt ZeroValue(Ty->getBitWidth(), 0);
1806         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1807           return true;
1808       }
1809     }
1810   }
1811 
1812   // The remaining tests are all recursive, so bail out if we hit the limit.
1813   if (Depth++ >= MaxDepth)
1814     return false;
1815 
1816   // Check for pointer simplifications.
1817   if (V->getType()->isPointerTy()) {
1818     if (isKnownNonNullAt(V, Q.CxtI, Q.DT))
1819       return true;
1820     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1821       if (isGEPKnownNonNull(GEP, Depth, Q))
1822         return true;
1823   }
1824 
1825   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1826 
1827   // X | Y != 0 if X != 0 or Y != 0.
1828   Value *X = nullptr, *Y = nullptr;
1829   if (match(V, m_Or(m_Value(X), m_Value(Y))))
1830     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1831 
1832   // ext X != 0 if X != 0.
1833   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1834     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1835 
1836   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
1837   // if the lowest bit is shifted off the end.
1838   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1839     // shl nuw can't remove any non-zero bits.
1840     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1841     if (BO->hasNoUnsignedWrap())
1842       return isKnownNonZero(X, Depth, Q);
1843 
1844     KnownBits Known(BitWidth);
1845     computeKnownBits(X, Known, Depth, Q);
1846     if (Known.One[0])
1847       return true;
1848   }
1849   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
1850   // defined if the sign bit is shifted off the end.
1851   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1852     // shr exact can only shift out zero bits.
1853     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1854     if (BO->isExact())
1855       return isKnownNonZero(X, Depth, Q);
1856 
1857     KnownBits Known = computeKnownBits(X, Depth, Q);
1858     if (Known.isNegative())
1859       return true;
1860 
1861     // If the shifter operand is a constant, and all of the bits shifted
1862     // out are known to be zero, and X is known non-zero then at least one
1863     // non-zero bit must remain.
1864     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1865       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1866       // Is there a known one in the portion not shifted out?
1867       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
1868         return true;
1869       // Are all the bits to be shifted out known zero?
1870       if (Known.countMinTrailingZeros() >= ShiftVal)
1871         return isKnownNonZero(X, Depth, Q);
1872     }
1873   }
1874   // div exact can only produce a zero if the dividend is zero.
1875   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1876     return isKnownNonZero(X, Depth, Q);
1877   }
1878   // X + Y.
1879   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1880     KnownBits XKnown = computeKnownBits(X, Depth, Q);
1881     KnownBits YKnown = computeKnownBits(Y, Depth, Q);
1882 
1883     // If X and Y are both non-negative (as signed values) then their sum is not
1884     // zero unless both X and Y are zero.
1885     if (XKnown.isNonNegative() && YKnown.isNonNegative())
1886       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
1887         return true;
1888 
1889     // If X and Y are both negative (as signed values) then their sum is not
1890     // zero unless both X and Y equal INT_MIN.
1891     if (XKnown.isNegative() && YKnown.isNegative()) {
1892       APInt Mask = APInt::getSignedMaxValue(BitWidth);
1893       // The sign bit of X is set.  If some other bit is set then X is not equal
1894       // to INT_MIN.
1895       if (XKnown.One.intersects(Mask))
1896         return true;
1897       // The sign bit of Y is set.  If some other bit is set then Y is not equal
1898       // to INT_MIN.
1899       if (YKnown.One.intersects(Mask))
1900         return true;
1901     }
1902 
1903     // The sum of a non-negative number and a power of two is not zero.
1904     if (XKnown.isNonNegative() &&
1905         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
1906       return true;
1907     if (YKnown.isNonNegative() &&
1908         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
1909       return true;
1910   }
1911   // X * Y.
1912   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1913     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1914     // If X and Y are non-zero then so is X * Y as long as the multiplication
1915     // does not overflow.
1916     if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1917         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
1918       return true;
1919   }
1920   // (C ? X : Y) != 0 if X != 0 and Y != 0.
1921   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
1922     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
1923         isKnownNonZero(SI->getFalseValue(), Depth, Q))
1924       return true;
1925   }
1926   // PHI
1927   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
1928     // Try and detect a recurrence that monotonically increases from a
1929     // starting value, as these are common as induction variables.
1930     if (PN->getNumIncomingValues() == 2) {
1931       Value *Start = PN->getIncomingValue(0);
1932       Value *Induction = PN->getIncomingValue(1);
1933       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
1934         std::swap(Start, Induction);
1935       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
1936         if (!C->isZero() && !C->isNegative()) {
1937           ConstantInt *X;
1938           if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
1939                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
1940               !X->isNegative())
1941             return true;
1942         }
1943       }
1944     }
1945     // Check if all incoming values are non-zero constant.
1946     bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
1947       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
1948     });
1949     if (AllNonZeroConstants)
1950       return true;
1951   }
1952 
1953   KnownBits Known(BitWidth);
1954   computeKnownBits(V, Known, Depth, Q);
1955   return Known.One != 0;
1956 }
1957 
1958 /// Return true if V2 == V1 + X, where X is known non-zero.
1959 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
1960   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
1961   if (!BO || BO->getOpcode() != Instruction::Add)
1962     return false;
1963   Value *Op = nullptr;
1964   if (V2 == BO->getOperand(0))
1965     Op = BO->getOperand(1);
1966   else if (V2 == BO->getOperand(1))
1967     Op = BO->getOperand(0);
1968   else
1969     return false;
1970   return isKnownNonZero(Op, 0, Q);
1971 }
1972 
1973 /// Return true if it is known that V1 != V2.
1974 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
1975   if (V1 == V2)
1976     return false;
1977   if (V1->getType() != V2->getType())
1978     // We can't look through casts yet.
1979     return false;
1980   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
1981     return true;
1982 
1983   if (V1->getType()->isIntOrIntVectorTy()) {
1984     // Are any known bits in V1 contradictory to known bits in V2? If V1
1985     // has a known zero where V2 has a known one, they must not be equal.
1986     KnownBits Known1 = computeKnownBits(V1, 0, Q);
1987     KnownBits Known2 = computeKnownBits(V2, 0, Q);
1988 
1989     if (Known1.Zero.intersects(Known2.One) ||
1990         Known2.Zero.intersects(Known1.One))
1991       return true;
1992   }
1993   return false;
1994 }
1995 
1996 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
1997 /// simplify operations downstream. Mask is known to be zero for bits that V
1998 /// cannot have.
1999 ///
2000 /// This function is defined on values with integer type, values with pointer
2001 /// type, and vectors of integers.  In the case
2002 /// where V is a vector, the mask, known zero, and known one values are the
2003 /// same width as the vector element, and the bit is set only if it is true
2004 /// for all of the elements in the vector.
2005 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2006                        const Query &Q) {
2007   KnownBits Known(Mask.getBitWidth());
2008   computeKnownBits(V, Known, Depth, Q);
2009   return Mask.isSubsetOf(Known.Zero);
2010 }
2011 
2012 /// For vector constants, loop over the elements and find the constant with the
2013 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2014 /// or if any element was not analyzed; otherwise, return the count for the
2015 /// element with the minimum number of sign bits.
2016 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2017                                                  unsigned TyBits) {
2018   const auto *CV = dyn_cast<Constant>(V);
2019   if (!CV || !CV->getType()->isVectorTy())
2020     return 0;
2021 
2022   unsigned MinSignBits = TyBits;
2023   unsigned NumElts = CV->getType()->getVectorNumElements();
2024   for (unsigned i = 0; i != NumElts; ++i) {
2025     // If we find a non-ConstantInt, bail out.
2026     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2027     if (!Elt)
2028       return 0;
2029 
2030     // If the sign bit is 1, flip the bits, so we always count leading zeros.
2031     APInt EltVal = Elt->getValue();
2032     if (EltVal.isNegative())
2033       EltVal = ~EltVal;
2034     MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros());
2035   }
2036 
2037   return MinSignBits;
2038 }
2039 
2040 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2041                                        const Query &Q);
2042 
2043 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2044                                    const Query &Q) {
2045   unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2046   assert(Result > 0 && "At least one sign bit needs to be present!");
2047   return Result;
2048 }
2049 
2050 /// Return the number of times the sign bit of the register is replicated into
2051 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2052 /// (itself), but other cases can give us information. For example, immediately
2053 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2054 /// other, so we return 3. For vectors, return the number of sign bits for the
2055 /// vector element with the mininum number of known sign bits.
2056 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2057                                        const Query &Q) {
2058   assert(Depth <= MaxDepth && "Limit Search Depth");
2059 
2060   // We return the minimum number of sign bits that are guaranteed to be present
2061   // in V, so for undef we have to conservatively return 1.  We don't have the
2062   // same behavior for poison though -- that's a FIXME today.
2063 
2064   unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2065   unsigned Tmp, Tmp2;
2066   unsigned FirstAnswer = 1;
2067 
2068   // Note that ConstantInt is handled by the general computeKnownBits case
2069   // below.
2070 
2071   if (Depth == MaxDepth)
2072     return 1;  // Limit search depth.
2073 
2074   const Operator *U = dyn_cast<Operator>(V);
2075   switch (Operator::getOpcode(V)) {
2076   default: break;
2077   case Instruction::SExt:
2078     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2079     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2080 
2081   case Instruction::SDiv: {
2082     const APInt *Denominator;
2083     // sdiv X, C -> adds log(C) sign bits.
2084     if (match(U->getOperand(1), m_APInt(Denominator))) {
2085 
2086       // Ignore non-positive denominator.
2087       if (!Denominator->isStrictlyPositive())
2088         break;
2089 
2090       // Calculate the incoming numerator bits.
2091       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2092 
2093       // Add floor(log(C)) bits to the numerator bits.
2094       return std::min(TyBits, NumBits + Denominator->logBase2());
2095     }
2096     break;
2097   }
2098 
2099   case Instruction::SRem: {
2100     const APInt *Denominator;
2101     // srem X, C -> we know that the result is within [-C+1,C) when C is a
2102     // positive constant.  This let us put a lower bound on the number of sign
2103     // bits.
2104     if (match(U->getOperand(1), m_APInt(Denominator))) {
2105 
2106       // Ignore non-positive denominator.
2107       if (!Denominator->isStrictlyPositive())
2108         break;
2109 
2110       // Calculate the incoming numerator bits. SRem by a positive constant
2111       // can't lower the number of sign bits.
2112       unsigned NumrBits =
2113           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2114 
2115       // Calculate the leading sign bit constraints by examining the
2116       // denominator.  Given that the denominator is positive, there are two
2117       // cases:
2118       //
2119       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
2120       //     (1 << ceilLogBase2(C)).
2121       //
2122       //  2. the numerator is negative.  Then the result range is (-C,0] and
2123       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2124       //
2125       // Thus a lower bound on the number of sign bits is `TyBits -
2126       // ceilLogBase2(C)`.
2127 
2128       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2129       return std::max(NumrBits, ResBits);
2130     }
2131     break;
2132   }
2133 
2134   case Instruction::AShr: {
2135     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2136     // ashr X, C   -> adds C sign bits.  Vectors too.
2137     const APInt *ShAmt;
2138     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2139       unsigned ShAmtLimited = ShAmt->getZExtValue();
2140       if (ShAmtLimited >= TyBits)
2141         break;  // Bad shift.
2142       Tmp += ShAmtLimited;
2143       if (Tmp > TyBits) Tmp = TyBits;
2144     }
2145     return Tmp;
2146   }
2147   case Instruction::Shl: {
2148     const APInt *ShAmt;
2149     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2150       // shl destroys sign bits.
2151       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2152       Tmp2 = ShAmt->getZExtValue();
2153       if (Tmp2 >= TyBits ||      // Bad shift.
2154           Tmp2 >= Tmp) break;    // Shifted all sign bits out.
2155       return Tmp - Tmp2;
2156     }
2157     break;
2158   }
2159   case Instruction::And:
2160   case Instruction::Or:
2161   case Instruction::Xor:    // NOT is handled here.
2162     // Logical binary ops preserve the number of sign bits at the worst.
2163     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2164     if (Tmp != 1) {
2165       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2166       FirstAnswer = std::min(Tmp, Tmp2);
2167       // We computed what we know about the sign bits as our first
2168       // answer. Now proceed to the generic code that uses
2169       // computeKnownBits, and pick whichever answer is better.
2170     }
2171     break;
2172 
2173   case Instruction::Select:
2174     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2175     if (Tmp == 1) return 1;  // Early out.
2176     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2177     return std::min(Tmp, Tmp2);
2178 
2179   case Instruction::Add:
2180     // Add can have at most one carry bit.  Thus we know that the output
2181     // is, at worst, one more bit than the inputs.
2182     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2183     if (Tmp == 1) return 1;  // Early out.
2184 
2185     // Special case decrementing a value (ADD X, -1):
2186     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2187       if (CRHS->isAllOnesValue()) {
2188         KnownBits Known(TyBits);
2189         computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2190 
2191         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2192         // sign bits set.
2193         if ((Known.Zero | 1).isAllOnesValue())
2194           return TyBits;
2195 
2196         // If we are subtracting one from a positive number, there is no carry
2197         // out of the result.
2198         if (Known.isNonNegative())
2199           return Tmp;
2200       }
2201 
2202     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2203     if (Tmp2 == 1) return 1;
2204     return std::min(Tmp, Tmp2)-1;
2205 
2206   case Instruction::Sub:
2207     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2208     if (Tmp2 == 1) return 1;
2209 
2210     // Handle NEG.
2211     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2212       if (CLHS->isNullValue()) {
2213         KnownBits Known(TyBits);
2214         computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2215         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2216         // sign bits set.
2217         if ((Known.Zero | 1).isAllOnesValue())
2218           return TyBits;
2219 
2220         // If the input is known to be positive (the sign bit is known clear),
2221         // the output of the NEG has the same number of sign bits as the input.
2222         if (Known.isNonNegative())
2223           return Tmp2;
2224 
2225         // Otherwise, we treat this like a SUB.
2226       }
2227 
2228     // Sub can have at most one carry bit.  Thus we know that the output
2229     // is, at worst, one more bit than the inputs.
2230     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2231     if (Tmp == 1) return 1;  // Early out.
2232     return std::min(Tmp, Tmp2)-1;
2233 
2234   case Instruction::Mul: {
2235     // The output of the Mul can be at most twice the valid bits in the inputs.
2236     unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2237     if (SignBitsOp0 == 1) return 1;  // Early out.
2238     unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2239     if (SignBitsOp1 == 1) return 1;
2240     unsigned OutValidBits =
2241         (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2242     return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2243   }
2244 
2245   case Instruction::PHI: {
2246     const PHINode *PN = cast<PHINode>(U);
2247     unsigned NumIncomingValues = PN->getNumIncomingValues();
2248     // Don't analyze large in-degree PHIs.
2249     if (NumIncomingValues > 4) break;
2250     // Unreachable blocks may have zero-operand PHI nodes.
2251     if (NumIncomingValues == 0) break;
2252 
2253     // Take the minimum of all incoming values.  This can't infinitely loop
2254     // because of our depth threshold.
2255     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2256     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2257       if (Tmp == 1) return Tmp;
2258       Tmp = std::min(
2259           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2260     }
2261     return Tmp;
2262   }
2263 
2264   case Instruction::Trunc:
2265     // FIXME: it's tricky to do anything useful for this, but it is an important
2266     // case for targets like X86.
2267     break;
2268 
2269   case Instruction::ExtractElement:
2270     // Look through extract element. At the moment we keep this simple and skip
2271     // tracking the specific element. But at least we might find information
2272     // valid for all elements of the vector (for example if vector is sign
2273     // extended, shifted, etc).
2274     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2275   }
2276 
2277   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2278   // use this information.
2279 
2280   // If we can examine all elements of a vector constant successfully, we're
2281   // done (we can't do any better than that). If not, keep trying.
2282   if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2283     return VecSignBits;
2284 
2285   KnownBits Known(TyBits);
2286   computeKnownBits(V, Known, Depth, Q);
2287 
2288   // If we know that the sign bit is either zero or one, determine the number of
2289   // identical bits in the top of the input value.
2290   return std::max(FirstAnswer, Known.countMinSignBits());
2291 }
2292 
2293 /// This function computes the integer multiple of Base that equals V.
2294 /// If successful, it returns true and returns the multiple in
2295 /// Multiple. If unsuccessful, it returns false. It looks
2296 /// through SExt instructions only if LookThroughSExt is true.
2297 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2298                            bool LookThroughSExt, unsigned Depth) {
2299   const unsigned MaxDepth = 6;
2300 
2301   assert(V && "No Value?");
2302   assert(Depth <= MaxDepth && "Limit Search Depth");
2303   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2304 
2305   Type *T = V->getType();
2306 
2307   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2308 
2309   if (Base == 0)
2310     return false;
2311 
2312   if (Base == 1) {
2313     Multiple = V;
2314     return true;
2315   }
2316 
2317   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2318   Constant *BaseVal = ConstantInt::get(T, Base);
2319   if (CO && CO == BaseVal) {
2320     // Multiple is 1.
2321     Multiple = ConstantInt::get(T, 1);
2322     return true;
2323   }
2324 
2325   if (CI && CI->getZExtValue() % Base == 0) {
2326     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2327     return true;
2328   }
2329 
2330   if (Depth == MaxDepth) return false;  // Limit search depth.
2331 
2332   Operator *I = dyn_cast<Operator>(V);
2333   if (!I) return false;
2334 
2335   switch (I->getOpcode()) {
2336   default: break;
2337   case Instruction::SExt:
2338     if (!LookThroughSExt) return false;
2339     // otherwise fall through to ZExt
2340     LLVM_FALLTHROUGH;
2341   case Instruction::ZExt:
2342     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2343                            LookThroughSExt, Depth+1);
2344   case Instruction::Shl:
2345   case Instruction::Mul: {
2346     Value *Op0 = I->getOperand(0);
2347     Value *Op1 = I->getOperand(1);
2348 
2349     if (I->getOpcode() == Instruction::Shl) {
2350       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2351       if (!Op1CI) return false;
2352       // Turn Op0 << Op1 into Op0 * 2^Op1
2353       APInt Op1Int = Op1CI->getValue();
2354       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2355       APInt API(Op1Int.getBitWidth(), 0);
2356       API.setBit(BitToSet);
2357       Op1 = ConstantInt::get(V->getContext(), API);
2358     }
2359 
2360     Value *Mul0 = nullptr;
2361     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2362       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2363         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2364           if (Op1C->getType()->getPrimitiveSizeInBits() <
2365               MulC->getType()->getPrimitiveSizeInBits())
2366             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2367           if (Op1C->getType()->getPrimitiveSizeInBits() >
2368               MulC->getType()->getPrimitiveSizeInBits())
2369             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2370 
2371           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2372           Multiple = ConstantExpr::getMul(MulC, Op1C);
2373           return true;
2374         }
2375 
2376       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2377         if (Mul0CI->getValue() == 1) {
2378           // V == Base * Op1, so return Op1
2379           Multiple = Op1;
2380           return true;
2381         }
2382     }
2383 
2384     Value *Mul1 = nullptr;
2385     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2386       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2387         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2388           if (Op0C->getType()->getPrimitiveSizeInBits() <
2389               MulC->getType()->getPrimitiveSizeInBits())
2390             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2391           if (Op0C->getType()->getPrimitiveSizeInBits() >
2392               MulC->getType()->getPrimitiveSizeInBits())
2393             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2394 
2395           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2396           Multiple = ConstantExpr::getMul(MulC, Op0C);
2397           return true;
2398         }
2399 
2400       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2401         if (Mul1CI->getValue() == 1) {
2402           // V == Base * Op0, so return Op0
2403           Multiple = Op0;
2404           return true;
2405         }
2406     }
2407   }
2408   }
2409 
2410   // We could not determine if V is a multiple of Base.
2411   return false;
2412 }
2413 
2414 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2415                                             const TargetLibraryInfo *TLI) {
2416   const Function *F = ICS.getCalledFunction();
2417   if (!F)
2418     return Intrinsic::not_intrinsic;
2419 
2420   if (F->isIntrinsic())
2421     return F->getIntrinsicID();
2422 
2423   if (!TLI)
2424     return Intrinsic::not_intrinsic;
2425 
2426   LibFunc Func;
2427   // We're going to make assumptions on the semantics of the functions, check
2428   // that the target knows that it's available in this environment and it does
2429   // not have local linkage.
2430   if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2431     return Intrinsic::not_intrinsic;
2432 
2433   if (!ICS.onlyReadsMemory())
2434     return Intrinsic::not_intrinsic;
2435 
2436   // Otherwise check if we have a call to a function that can be turned into a
2437   // vector intrinsic.
2438   switch (Func) {
2439   default:
2440     break;
2441   case LibFunc_sin:
2442   case LibFunc_sinf:
2443   case LibFunc_sinl:
2444     return Intrinsic::sin;
2445   case LibFunc_cos:
2446   case LibFunc_cosf:
2447   case LibFunc_cosl:
2448     return Intrinsic::cos;
2449   case LibFunc_exp:
2450   case LibFunc_expf:
2451   case LibFunc_expl:
2452     return Intrinsic::exp;
2453   case LibFunc_exp2:
2454   case LibFunc_exp2f:
2455   case LibFunc_exp2l:
2456     return Intrinsic::exp2;
2457   case LibFunc_log:
2458   case LibFunc_logf:
2459   case LibFunc_logl:
2460     return Intrinsic::log;
2461   case LibFunc_log10:
2462   case LibFunc_log10f:
2463   case LibFunc_log10l:
2464     return Intrinsic::log10;
2465   case LibFunc_log2:
2466   case LibFunc_log2f:
2467   case LibFunc_log2l:
2468     return Intrinsic::log2;
2469   case LibFunc_fabs:
2470   case LibFunc_fabsf:
2471   case LibFunc_fabsl:
2472     return Intrinsic::fabs;
2473   case LibFunc_fmin:
2474   case LibFunc_fminf:
2475   case LibFunc_fminl:
2476     return Intrinsic::minnum;
2477   case LibFunc_fmax:
2478   case LibFunc_fmaxf:
2479   case LibFunc_fmaxl:
2480     return Intrinsic::maxnum;
2481   case LibFunc_copysign:
2482   case LibFunc_copysignf:
2483   case LibFunc_copysignl:
2484     return Intrinsic::copysign;
2485   case LibFunc_floor:
2486   case LibFunc_floorf:
2487   case LibFunc_floorl:
2488     return Intrinsic::floor;
2489   case LibFunc_ceil:
2490   case LibFunc_ceilf:
2491   case LibFunc_ceill:
2492     return Intrinsic::ceil;
2493   case LibFunc_trunc:
2494   case LibFunc_truncf:
2495   case LibFunc_truncl:
2496     return Intrinsic::trunc;
2497   case LibFunc_rint:
2498   case LibFunc_rintf:
2499   case LibFunc_rintl:
2500     return Intrinsic::rint;
2501   case LibFunc_nearbyint:
2502   case LibFunc_nearbyintf:
2503   case LibFunc_nearbyintl:
2504     return Intrinsic::nearbyint;
2505   case LibFunc_round:
2506   case LibFunc_roundf:
2507   case LibFunc_roundl:
2508     return Intrinsic::round;
2509   case LibFunc_pow:
2510   case LibFunc_powf:
2511   case LibFunc_powl:
2512     return Intrinsic::pow;
2513   case LibFunc_sqrt:
2514   case LibFunc_sqrtf:
2515   case LibFunc_sqrtl:
2516     if (ICS->hasNoNaNs())
2517       return Intrinsic::sqrt;
2518     return Intrinsic::not_intrinsic;
2519   }
2520 
2521   return Intrinsic::not_intrinsic;
2522 }
2523 
2524 /// Return true if we can prove that the specified FP value is never equal to
2525 /// -0.0.
2526 ///
2527 /// NOTE: this function will need to be revisited when we support non-default
2528 /// rounding modes!
2529 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2530                                 unsigned Depth) {
2531   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2532     return !CFP->getValueAPF().isNegZero();
2533 
2534   if (Depth == MaxDepth)
2535     return false;  // Limit search depth.
2536 
2537   const Operator *I = dyn_cast<Operator>(V);
2538   if (!I) return false;
2539 
2540   // Check if the nsz fast-math flag is set
2541   if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2542     if (FPO->hasNoSignedZeros())
2543       return true;
2544 
2545   // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2546   if (I->getOpcode() == Instruction::FAdd)
2547     if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2548       if (CFP->isNullValue())
2549         return true;
2550 
2551   // sitofp and uitofp turn into +0.0 for zero.
2552   if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2553     return true;
2554 
2555   if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2556     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2557     switch (IID) {
2558     default:
2559       break;
2560     // sqrt(-0.0) = -0.0, no other negative results are possible.
2561     case Intrinsic::sqrt:
2562       return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1);
2563     // fabs(x) != -0.0
2564     case Intrinsic::fabs:
2565       return true;
2566     }
2567   }
2568 
2569   return false;
2570 }
2571 
2572 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2573 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2574 /// bit despite comparing equal.
2575 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2576                                             const TargetLibraryInfo *TLI,
2577                                             bool SignBitOnly,
2578                                             unsigned Depth) {
2579   // TODO: This function does not do the right thing when SignBitOnly is true
2580   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2581   // which flips the sign bits of NaNs.  See
2582   // https://llvm.org/bugs/show_bug.cgi?id=31702.
2583 
2584   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2585     return !CFP->getValueAPF().isNegative() ||
2586            (!SignBitOnly && CFP->getValueAPF().isZero());
2587   }
2588 
2589   if (Depth == MaxDepth)
2590     return false; // Limit search depth.
2591 
2592   const Operator *I = dyn_cast<Operator>(V);
2593   if (!I)
2594     return false;
2595 
2596   switch (I->getOpcode()) {
2597   default:
2598     break;
2599   // Unsigned integers are always nonnegative.
2600   case Instruction::UIToFP:
2601     return true;
2602   case Instruction::FMul:
2603     // x*x is always non-negative or a NaN.
2604     if (I->getOperand(0) == I->getOperand(1) &&
2605         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2606       return true;
2607 
2608     LLVM_FALLTHROUGH;
2609   case Instruction::FAdd:
2610   case Instruction::FDiv:
2611   case Instruction::FRem:
2612     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2613                                            Depth + 1) &&
2614            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2615                                            Depth + 1);
2616   case Instruction::Select:
2617     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2618                                            Depth + 1) &&
2619            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2620                                            Depth + 1);
2621   case Instruction::FPExt:
2622   case Instruction::FPTrunc:
2623     // Widening/narrowing never change sign.
2624     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2625                                            Depth + 1);
2626   case Instruction::Call:
2627     const auto *CI = cast<CallInst>(I);
2628     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2629     switch (IID) {
2630     default:
2631       break;
2632     case Intrinsic::maxnum:
2633       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2634                                              Depth + 1) ||
2635              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2636                                              Depth + 1);
2637     case Intrinsic::minnum:
2638       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2639                                              Depth + 1) &&
2640              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2641                                              Depth + 1);
2642     case Intrinsic::exp:
2643     case Intrinsic::exp2:
2644     case Intrinsic::fabs:
2645       return true;
2646 
2647     case Intrinsic::sqrt:
2648       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
2649       if (!SignBitOnly)
2650         return true;
2651       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2652                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
2653 
2654     case Intrinsic::powi:
2655       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2656         // powi(x,n) is non-negative if n is even.
2657         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2658           return true;
2659       }
2660       // TODO: This is not correct.  Given that exp is an integer, here are the
2661       // ways that pow can return a negative value:
2662       //
2663       //   pow(x, exp)    --> negative if exp is odd and x is negative.
2664       //   pow(-0, exp)   --> -inf if exp is negative odd.
2665       //   pow(-0, exp)   --> -0 if exp is positive odd.
2666       //   pow(-inf, exp) --> -0 if exp is negative odd.
2667       //   pow(-inf, exp) --> -inf if exp is positive odd.
2668       //
2669       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2670       // but we must return false if x == -0.  Unfortunately we do not currently
2671       // have a way of expressing this constraint.  See details in
2672       // https://llvm.org/bugs/show_bug.cgi?id=31702.
2673       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2674                                              Depth + 1);
2675 
2676     case Intrinsic::fma:
2677     case Intrinsic::fmuladd:
2678       // x*x+y is non-negative if y is non-negative.
2679       return I->getOperand(0) == I->getOperand(1) &&
2680              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2681              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2682                                              Depth + 1);
2683     }
2684     break;
2685   }
2686   return false;
2687 }
2688 
2689 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2690                                        const TargetLibraryInfo *TLI) {
2691   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2692 }
2693 
2694 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
2695   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2696 }
2697 
2698 bool llvm::isKnownNeverNaN(const Value *V) {
2699   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
2700 
2701   // If we're told that NaNs won't happen, assume they won't.
2702   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
2703     if (FPMathOp->hasNoNaNs())
2704       return true;
2705 
2706   // TODO: Handle instructions and potentially recurse like other 'isKnown'
2707   // functions. For example, the result of sitofp is never NaN.
2708 
2709   // Handle scalar constants.
2710   if (auto *CFP = dyn_cast<ConstantFP>(V))
2711     return !CFP->isNaN();
2712 
2713   // Bail out for constant expressions, but try to handle vector constants.
2714   if (!V->getType()->isVectorTy() || !isa<Constant>(V))
2715     return false;
2716 
2717   // For vectors, verify that each element is not NaN.
2718   unsigned NumElts = V->getType()->getVectorNumElements();
2719   for (unsigned i = 0; i != NumElts; ++i) {
2720     Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
2721     if (!Elt)
2722       return false;
2723     if (isa<UndefValue>(Elt))
2724       continue;
2725     auto *CElt = dyn_cast<ConstantFP>(Elt);
2726     if (!CElt || CElt->isNaN())
2727       return false;
2728   }
2729   // All elements were confirmed not-NaN or undefined.
2730   return true;
2731 }
2732 
2733 /// If the specified value can be set by repeating the same byte in memory,
2734 /// return the i8 value that it is represented with.  This is
2735 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2736 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
2737 /// byte store (e.g. i16 0x1234), return null.
2738 Value *llvm::isBytewiseValue(Value *V) {
2739   // All byte-wide stores are splatable, even of arbitrary variables.
2740   if (V->getType()->isIntegerTy(8)) return V;
2741 
2742   // Handle 'null' ConstantArrayZero etc.
2743   if (Constant *C = dyn_cast<Constant>(V))
2744     if (C->isNullValue())
2745       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2746 
2747   // Constant float and double values can be handled as integer values if the
2748   // corresponding integer value is "byteable".  An important case is 0.0.
2749   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2750     if (CFP->getType()->isFloatTy())
2751       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2752     if (CFP->getType()->isDoubleTy())
2753       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2754     // Don't handle long double formats, which have strange constraints.
2755   }
2756 
2757   // We can handle constant integers that are multiple of 8 bits.
2758   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2759     if (CI->getBitWidth() % 8 == 0) {
2760       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2761 
2762       if (!CI->getValue().isSplat(8))
2763         return nullptr;
2764       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2765     }
2766   }
2767 
2768   // A ConstantDataArray/Vector is splatable if all its members are equal and
2769   // also splatable.
2770   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2771     Value *Elt = CA->getElementAsConstant(0);
2772     Value *Val = isBytewiseValue(Elt);
2773     if (!Val)
2774       return nullptr;
2775 
2776     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2777       if (CA->getElementAsConstant(I) != Elt)
2778         return nullptr;
2779 
2780     return Val;
2781   }
2782 
2783   // Conceptually, we could handle things like:
2784   //   %a = zext i8 %X to i16
2785   //   %b = shl i16 %a, 8
2786   //   %c = or i16 %a, %b
2787   // but until there is an example that actually needs this, it doesn't seem
2788   // worth worrying about.
2789   return nullptr;
2790 }
2791 
2792 // This is the recursive version of BuildSubAggregate. It takes a few different
2793 // arguments. Idxs is the index within the nested struct From that we are
2794 // looking at now (which is of type IndexedType). IdxSkip is the number of
2795 // indices from Idxs that should be left out when inserting into the resulting
2796 // struct. To is the result struct built so far, new insertvalue instructions
2797 // build on that.
2798 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2799                                 SmallVectorImpl<unsigned> &Idxs,
2800                                 unsigned IdxSkip,
2801                                 Instruction *InsertBefore) {
2802   StructType *STy = dyn_cast<StructType>(IndexedType);
2803   if (STy) {
2804     // Save the original To argument so we can modify it
2805     Value *OrigTo = To;
2806     // General case, the type indexed by Idxs is a struct
2807     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2808       // Process each struct element recursively
2809       Idxs.push_back(i);
2810       Value *PrevTo = To;
2811       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2812                              InsertBefore);
2813       Idxs.pop_back();
2814       if (!To) {
2815         // Couldn't find any inserted value for this index? Cleanup
2816         while (PrevTo != OrigTo) {
2817           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2818           PrevTo = Del->getAggregateOperand();
2819           Del->eraseFromParent();
2820         }
2821         // Stop processing elements
2822         break;
2823       }
2824     }
2825     // If we successfully found a value for each of our subaggregates
2826     if (To)
2827       return To;
2828   }
2829   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2830   // the struct's elements had a value that was inserted directly. In the latter
2831   // case, perhaps we can't determine each of the subelements individually, but
2832   // we might be able to find the complete struct somewhere.
2833 
2834   // Find the value that is at that particular spot
2835   Value *V = FindInsertedValue(From, Idxs);
2836 
2837   if (!V)
2838     return nullptr;
2839 
2840   // Insert the value in the new (sub) aggregrate
2841   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2842                                  "tmp", InsertBefore);
2843 }
2844 
2845 // This helper takes a nested struct and extracts a part of it (which is again a
2846 // struct) into a new value. For example, given the struct:
2847 // { a, { b, { c, d }, e } }
2848 // and the indices "1, 1" this returns
2849 // { c, d }.
2850 //
2851 // It does this by inserting an insertvalue for each element in the resulting
2852 // struct, as opposed to just inserting a single struct. This will only work if
2853 // each of the elements of the substruct are known (ie, inserted into From by an
2854 // insertvalue instruction somewhere).
2855 //
2856 // All inserted insertvalue instructions are inserted before InsertBefore
2857 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2858                                 Instruction *InsertBefore) {
2859   assert(InsertBefore && "Must have someplace to insert!");
2860   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2861                                                              idx_range);
2862   Value *To = UndefValue::get(IndexedType);
2863   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2864   unsigned IdxSkip = Idxs.size();
2865 
2866   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2867 }
2868 
2869 /// Given an aggregrate and an sequence of indices, see if
2870 /// the scalar value indexed is already around as a register, for example if it
2871 /// were inserted directly into the aggregrate.
2872 ///
2873 /// If InsertBefore is not null, this function will duplicate (modified)
2874 /// insertvalues when a part of a nested struct is extracted.
2875 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2876                                Instruction *InsertBefore) {
2877   // Nothing to index? Just return V then (this is useful at the end of our
2878   // recursion).
2879   if (idx_range.empty())
2880     return V;
2881   // We have indices, so V should have an indexable type.
2882   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2883          "Not looking at a struct or array?");
2884   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2885          "Invalid indices for type?");
2886 
2887   if (Constant *C = dyn_cast<Constant>(V)) {
2888     C = C->getAggregateElement(idx_range[0]);
2889     if (!C) return nullptr;
2890     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2891   }
2892 
2893   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2894     // Loop the indices for the insertvalue instruction in parallel with the
2895     // requested indices
2896     const unsigned *req_idx = idx_range.begin();
2897     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2898          i != e; ++i, ++req_idx) {
2899       if (req_idx == idx_range.end()) {
2900         // We can't handle this without inserting insertvalues
2901         if (!InsertBefore)
2902           return nullptr;
2903 
2904         // The requested index identifies a part of a nested aggregate. Handle
2905         // this specially. For example,
2906         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2907         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2908         // %C = extractvalue {i32, { i32, i32 } } %B, 1
2909         // This can be changed into
2910         // %A = insertvalue {i32, i32 } undef, i32 10, 0
2911         // %C = insertvalue {i32, i32 } %A, i32 11, 1
2912         // which allows the unused 0,0 element from the nested struct to be
2913         // removed.
2914         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2915                                  InsertBefore);
2916       }
2917 
2918       // This insert value inserts something else than what we are looking for.
2919       // See if the (aggregate) value inserted into has the value we are
2920       // looking for, then.
2921       if (*req_idx != *i)
2922         return FindInsertedValue(I->getAggregateOperand(), idx_range,
2923                                  InsertBefore);
2924     }
2925     // If we end up here, the indices of the insertvalue match with those
2926     // requested (though possibly only partially). Now we recursively look at
2927     // the inserted value, passing any remaining indices.
2928     return FindInsertedValue(I->getInsertedValueOperand(),
2929                              makeArrayRef(req_idx, idx_range.end()),
2930                              InsertBefore);
2931   }
2932 
2933   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2934     // If we're extracting a value from an aggregate that was extracted from
2935     // something else, we can extract from that something else directly instead.
2936     // However, we will need to chain I's indices with the requested indices.
2937 
2938     // Calculate the number of indices required
2939     unsigned size = I->getNumIndices() + idx_range.size();
2940     // Allocate some space to put the new indices in
2941     SmallVector<unsigned, 5> Idxs;
2942     Idxs.reserve(size);
2943     // Add indices from the extract value instruction
2944     Idxs.append(I->idx_begin(), I->idx_end());
2945 
2946     // Add requested indices
2947     Idxs.append(idx_range.begin(), idx_range.end());
2948 
2949     assert(Idxs.size() == size
2950            && "Number of indices added not correct?");
2951 
2952     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2953   }
2954   // Otherwise, we don't know (such as, extracting from a function return value
2955   // or load instruction)
2956   return nullptr;
2957 }
2958 
2959 /// Analyze the specified pointer to see if it can be expressed as a base
2960 /// pointer plus a constant offset. Return the base and offset to the caller.
2961 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2962                                               const DataLayout &DL) {
2963   unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2964   APInt ByteOffset(BitWidth, 0);
2965 
2966   // We walk up the defs but use a visited set to handle unreachable code. In
2967   // that case, we stop after accumulating the cycle once (not that it
2968   // matters).
2969   SmallPtrSet<Value *, 16> Visited;
2970   while (Visited.insert(Ptr).second) {
2971     if (Ptr->getType()->isVectorTy())
2972       break;
2973 
2974     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2975       // If one of the values we have visited is an addrspacecast, then
2976       // the pointer type of this GEP may be different from the type
2977       // of the Ptr parameter which was passed to this function.  This
2978       // means when we construct GEPOffset, we need to use the size
2979       // of GEP's pointer type rather than the size of the original
2980       // pointer type.
2981       APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
2982       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2983         break;
2984 
2985       ByteOffset += GEPOffset.getSExtValue();
2986 
2987       Ptr = GEP->getPointerOperand();
2988     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2989                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2990       Ptr = cast<Operator>(Ptr)->getOperand(0);
2991     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2992       if (GA->isInterposable())
2993         break;
2994       Ptr = GA->getAliasee();
2995     } else {
2996       break;
2997     }
2998   }
2999   Offset = ByteOffset.getSExtValue();
3000   return Ptr;
3001 }
3002 
3003 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3004                                        unsigned CharSize) {
3005   // Make sure the GEP has exactly three arguments.
3006   if (GEP->getNumOperands() != 3)
3007     return false;
3008 
3009   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3010   // CharSize.
3011   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3012   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3013     return false;
3014 
3015   // Check to make sure that the first operand of the GEP is an integer and
3016   // has value 0 so that we are sure we're indexing into the initializer.
3017   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3018   if (!FirstIdx || !FirstIdx->isZero())
3019     return false;
3020 
3021   return true;
3022 }
3023 
3024 bool llvm::getConstantDataArrayInfo(const Value *V,
3025                                     ConstantDataArraySlice &Slice,
3026                                     unsigned ElementSize, uint64_t Offset) {
3027   assert(V);
3028 
3029   // Look through bitcast instructions and geps.
3030   V = V->stripPointerCasts();
3031 
3032   // If the value is a GEP instruction or constant expression, treat it as an
3033   // offset.
3034   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3035     // The GEP operator should be based on a pointer to string constant, and is
3036     // indexing into the string constant.
3037     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3038       return false;
3039 
3040     // If the second index isn't a ConstantInt, then this is a variable index
3041     // into the array.  If this occurs, we can't say anything meaningful about
3042     // the string.
3043     uint64_t StartIdx = 0;
3044     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3045       StartIdx = CI->getZExtValue();
3046     else
3047       return false;
3048     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3049                                     StartIdx + Offset);
3050   }
3051 
3052   // The GEP instruction, constant or instruction, must reference a global
3053   // variable that is a constant and is initialized. The referenced constant
3054   // initializer is the array that we'll use for optimization.
3055   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3056   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3057     return false;
3058 
3059   const ConstantDataArray *Array;
3060   ArrayType *ArrayTy;
3061   if (GV->getInitializer()->isNullValue()) {
3062     Type *GVTy = GV->getValueType();
3063     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3064       // A zeroinitializer for the array; there is no ConstantDataArray.
3065       Array = nullptr;
3066     } else {
3067       const DataLayout &DL = GV->getParent()->getDataLayout();
3068       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3069       uint64_t Length = SizeInBytes / (ElementSize / 8);
3070       if (Length <= Offset)
3071         return false;
3072 
3073       Slice.Array = nullptr;
3074       Slice.Offset = 0;
3075       Slice.Length = Length - Offset;
3076       return true;
3077     }
3078   } else {
3079     // This must be a ConstantDataArray.
3080     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3081     if (!Array)
3082       return false;
3083     ArrayTy = Array->getType();
3084   }
3085   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3086     return false;
3087 
3088   uint64_t NumElts = ArrayTy->getArrayNumElements();
3089   if (Offset > NumElts)
3090     return false;
3091 
3092   Slice.Array = Array;
3093   Slice.Offset = Offset;
3094   Slice.Length = NumElts - Offset;
3095   return true;
3096 }
3097 
3098 /// This function computes the length of a null-terminated C string pointed to
3099 /// by V. If successful, it returns true and returns the string in Str.
3100 /// If unsuccessful, it returns false.
3101 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3102                                  uint64_t Offset, bool TrimAtNul) {
3103   ConstantDataArraySlice Slice;
3104   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3105     return false;
3106 
3107   if (Slice.Array == nullptr) {
3108     if (TrimAtNul) {
3109       Str = StringRef();
3110       return true;
3111     }
3112     if (Slice.Length == 1) {
3113       Str = StringRef("", 1);
3114       return true;
3115     }
3116     // We cannot instantiate a StringRef as we do not have an appropriate string
3117     // of 0s at hand.
3118     return false;
3119   }
3120 
3121   // Start out with the entire array in the StringRef.
3122   Str = Slice.Array->getAsString();
3123   // Skip over 'offset' bytes.
3124   Str = Str.substr(Slice.Offset);
3125 
3126   if (TrimAtNul) {
3127     // Trim off the \0 and anything after it.  If the array is not nul
3128     // terminated, we just return the whole end of string.  The client may know
3129     // some other way that the string is length-bound.
3130     Str = Str.substr(0, Str.find('\0'));
3131   }
3132   return true;
3133 }
3134 
3135 // These next two are very similar to the above, but also look through PHI
3136 // nodes.
3137 // TODO: See if we can integrate these two together.
3138 
3139 /// If we can compute the length of the string pointed to by
3140 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3141 static uint64_t GetStringLengthH(const Value *V,
3142                                  SmallPtrSetImpl<const PHINode*> &PHIs,
3143                                  unsigned CharSize) {
3144   // Look through noop bitcast instructions.
3145   V = V->stripPointerCasts();
3146 
3147   // If this is a PHI node, there are two cases: either we have already seen it
3148   // or we haven't.
3149   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3150     if (!PHIs.insert(PN).second)
3151       return ~0ULL;  // already in the set.
3152 
3153     // If it was new, see if all the input strings are the same length.
3154     uint64_t LenSoFar = ~0ULL;
3155     for (Value *IncValue : PN->incoming_values()) {
3156       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3157       if (Len == 0) return 0; // Unknown length -> unknown.
3158 
3159       if (Len == ~0ULL) continue;
3160 
3161       if (Len != LenSoFar && LenSoFar != ~0ULL)
3162         return 0;    // Disagree -> unknown.
3163       LenSoFar = Len;
3164     }
3165 
3166     // Success, all agree.
3167     return LenSoFar;
3168   }
3169 
3170   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3171   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3172     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3173     if (Len1 == 0) return 0;
3174     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3175     if (Len2 == 0) return 0;
3176     if (Len1 == ~0ULL) return Len2;
3177     if (Len2 == ~0ULL) return Len1;
3178     if (Len1 != Len2) return 0;
3179     return Len1;
3180   }
3181 
3182   // Otherwise, see if we can read the string.
3183   ConstantDataArraySlice Slice;
3184   if (!getConstantDataArrayInfo(V, Slice, CharSize))
3185     return 0;
3186 
3187   if (Slice.Array == nullptr)
3188     return 1;
3189 
3190   // Search for nul characters
3191   unsigned NullIndex = 0;
3192   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3193     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3194       break;
3195   }
3196 
3197   return NullIndex + 1;
3198 }
3199 
3200 /// If we can compute the length of the string pointed to by
3201 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3202 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3203   if (!V->getType()->isPointerTy()) return 0;
3204 
3205   SmallPtrSet<const PHINode*, 32> PHIs;
3206   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3207   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3208   // an empty string as a length.
3209   return Len == ~0ULL ? 1 : Len;
3210 }
3211 
3212 /// \brief \p PN defines a loop-variant pointer to an object.  Check if the
3213 /// previous iteration of the loop was referring to the same object as \p PN.
3214 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3215                                          const LoopInfo *LI) {
3216   // Find the loop-defined value.
3217   Loop *L = LI->getLoopFor(PN->getParent());
3218   if (PN->getNumIncomingValues() != 2)
3219     return true;
3220 
3221   // Find the value from previous iteration.
3222   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3223   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3224     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3225   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3226     return true;
3227 
3228   // If a new pointer is loaded in the loop, the pointer references a different
3229   // object in every iteration.  E.g.:
3230   //    for (i)
3231   //       int *p = a[i];
3232   //       ...
3233   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3234     if (!L->isLoopInvariant(Load->getPointerOperand()))
3235       return false;
3236   return true;
3237 }
3238 
3239 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3240                                  unsigned MaxLookup) {
3241   if (!V->getType()->isPointerTy())
3242     return V;
3243   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3244     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3245       V = GEP->getPointerOperand();
3246     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3247                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3248       V = cast<Operator>(V)->getOperand(0);
3249     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3250       if (GA->isInterposable())
3251         return V;
3252       V = GA->getAliasee();
3253     } else if (isa<AllocaInst>(V)) {
3254       // An alloca can't be further simplified.
3255       return V;
3256     } else {
3257       if (auto CS = CallSite(V))
3258         if (Value *RV = CS.getReturnedArgOperand()) {
3259           V = RV;
3260           continue;
3261         }
3262 
3263       // See if InstructionSimplify knows any relevant tricks.
3264       if (Instruction *I = dyn_cast<Instruction>(V))
3265         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3266         if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3267           V = Simplified;
3268           continue;
3269         }
3270 
3271       return V;
3272     }
3273     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3274   }
3275   return V;
3276 }
3277 
3278 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3279                                 const DataLayout &DL, LoopInfo *LI,
3280                                 unsigned MaxLookup) {
3281   SmallPtrSet<Value *, 4> Visited;
3282   SmallVector<Value *, 4> Worklist;
3283   Worklist.push_back(V);
3284   do {
3285     Value *P = Worklist.pop_back_val();
3286     P = GetUnderlyingObject(P, DL, MaxLookup);
3287 
3288     if (!Visited.insert(P).second)
3289       continue;
3290 
3291     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3292       Worklist.push_back(SI->getTrueValue());
3293       Worklist.push_back(SI->getFalseValue());
3294       continue;
3295     }
3296 
3297     if (PHINode *PN = dyn_cast<PHINode>(P)) {
3298       // If this PHI changes the underlying object in every iteration of the
3299       // loop, don't look through it.  Consider:
3300       //   int **A;
3301       //   for (i) {
3302       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3303       //     Curr = A[i];
3304       //     *Prev, *Curr;
3305       //
3306       // Prev is tracking Curr one iteration behind so they refer to different
3307       // underlying objects.
3308       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3309           isSameUnderlyingObjectInLoop(PN, LI))
3310         for (Value *IncValue : PN->incoming_values())
3311           Worklist.push_back(IncValue);
3312       continue;
3313     }
3314 
3315     Objects.push_back(P);
3316   } while (!Worklist.empty());
3317 }
3318 
3319 /// This is the function that does the work of looking through basic
3320 /// ptrtoint+arithmetic+inttoptr sequences.
3321 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3322   do {
3323     if (const Operator *U = dyn_cast<Operator>(V)) {
3324       // If we find a ptrtoint, we can transfer control back to the
3325       // regular getUnderlyingObjectFromInt.
3326       if (U->getOpcode() == Instruction::PtrToInt)
3327         return U->getOperand(0);
3328       // If we find an add of a constant, a multiplied value, or a phi, it's
3329       // likely that the other operand will lead us to the base
3330       // object. We don't have to worry about the case where the
3331       // object address is somehow being computed by the multiply,
3332       // because our callers only care when the result is an
3333       // identifiable object.
3334       if (U->getOpcode() != Instruction::Add ||
3335           (!isa<ConstantInt>(U->getOperand(1)) &&
3336            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3337            !isa<PHINode>(U->getOperand(1))))
3338         return V;
3339       V = U->getOperand(0);
3340     } else {
3341       return V;
3342     }
3343     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3344   } while (true);
3345 }
3346 
3347 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3348 /// ptrtoint+arithmetic+inttoptr sequences.
3349 void llvm::getUnderlyingObjectsForCodeGen(const Value *V,
3350                           SmallVectorImpl<Value *> &Objects,
3351                           const DataLayout &DL) {
3352   SmallPtrSet<const Value *, 16> Visited;
3353   SmallVector<const Value *, 4> Working(1, V);
3354   do {
3355     V = Working.pop_back_val();
3356 
3357     SmallVector<Value *, 4> Objs;
3358     GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
3359 
3360     for (Value *V : Objs) {
3361       if (!Visited.insert(V).second)
3362         continue;
3363       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3364         const Value *O =
3365           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3366         if (O->getType()->isPointerTy()) {
3367           Working.push_back(O);
3368           continue;
3369         }
3370       }
3371       // If GetUnderlyingObjects fails to find an identifiable object,
3372       // getUnderlyingObjectsForCodeGen also fails for safety.
3373       if (!isIdentifiedObject(V)) {
3374         Objects.clear();
3375         return;
3376       }
3377       Objects.push_back(const_cast<Value *>(V));
3378     }
3379   } while (!Working.empty());
3380 }
3381 
3382 /// Return true if the only users of this pointer are lifetime markers.
3383 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3384   for (const User *U : V->users()) {
3385     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3386     if (!II) return false;
3387 
3388     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3389         II->getIntrinsicID() != Intrinsic::lifetime_end)
3390       return false;
3391   }
3392   return true;
3393 }
3394 
3395 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3396                                         const Instruction *CtxI,
3397                                         const DominatorTree *DT) {
3398   const Operator *Inst = dyn_cast<Operator>(V);
3399   if (!Inst)
3400     return false;
3401 
3402   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3403     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3404       if (C->canTrap())
3405         return false;
3406 
3407   switch (Inst->getOpcode()) {
3408   default:
3409     return true;
3410   case Instruction::UDiv:
3411   case Instruction::URem: {
3412     // x / y is undefined if y == 0.
3413     const APInt *V;
3414     if (match(Inst->getOperand(1), m_APInt(V)))
3415       return *V != 0;
3416     return false;
3417   }
3418   case Instruction::SDiv:
3419   case Instruction::SRem: {
3420     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3421     const APInt *Numerator, *Denominator;
3422     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3423       return false;
3424     // We cannot hoist this division if the denominator is 0.
3425     if (*Denominator == 0)
3426       return false;
3427     // It's safe to hoist if the denominator is not 0 or -1.
3428     if (*Denominator != -1)
3429       return true;
3430     // At this point we know that the denominator is -1.  It is safe to hoist as
3431     // long we know that the numerator is not INT_MIN.
3432     if (match(Inst->getOperand(0), m_APInt(Numerator)))
3433       return !Numerator->isMinSignedValue();
3434     // The numerator *might* be MinSignedValue.
3435     return false;
3436   }
3437   case Instruction::Load: {
3438     const LoadInst *LI = cast<LoadInst>(Inst);
3439     if (!LI->isUnordered() ||
3440         // Speculative load may create a race that did not exist in the source.
3441         LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3442         // Speculative load may load data from dirty regions.
3443         LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress))
3444       return false;
3445     const DataLayout &DL = LI->getModule()->getDataLayout();
3446     return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3447                                               LI->getAlignment(), DL, CtxI, DT);
3448   }
3449   case Instruction::Call: {
3450     auto *CI = cast<const CallInst>(Inst);
3451     const Function *Callee = CI->getCalledFunction();
3452 
3453     // The called function could have undefined behavior or side-effects, even
3454     // if marked readnone nounwind.
3455     return Callee && Callee->isSpeculatable();
3456   }
3457   case Instruction::VAArg:
3458   case Instruction::Alloca:
3459   case Instruction::Invoke:
3460   case Instruction::PHI:
3461   case Instruction::Store:
3462   case Instruction::Ret:
3463   case Instruction::Br:
3464   case Instruction::IndirectBr:
3465   case Instruction::Switch:
3466   case Instruction::Unreachable:
3467   case Instruction::Fence:
3468   case Instruction::AtomicRMW:
3469   case Instruction::AtomicCmpXchg:
3470   case Instruction::LandingPad:
3471   case Instruction::Resume:
3472   case Instruction::CatchSwitch:
3473   case Instruction::CatchPad:
3474   case Instruction::CatchRet:
3475   case Instruction::CleanupPad:
3476   case Instruction::CleanupRet:
3477     return false; // Misc instructions which have effects
3478   }
3479 }
3480 
3481 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3482   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3483 }
3484 
3485 /// Return true if we know that the specified value is never null.
3486 bool llvm::isKnownNonNull(const Value *V) {
3487   assert(V->getType()->isPointerTy() && "V must be pointer type");
3488 
3489   // Alloca never returns null, malloc might.
3490   if (isa<AllocaInst>(V)) return true;
3491 
3492   // A byval, inalloca, or nonnull argument is never null.
3493   if (const Argument *A = dyn_cast<Argument>(V))
3494     return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3495 
3496   // A global variable in address space 0 is non null unless extern weak
3497   // or an absolute symbol reference. Other address spaces may have null as a
3498   // valid address for a global, so we can't assume anything.
3499   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3500     return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3501            GV->getType()->getAddressSpace() == 0;
3502 
3503   // A Load tagged with nonnull metadata is never null.
3504   if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3505     return LI->getMetadata(LLVMContext::MD_nonnull);
3506 
3507   if (auto CS = ImmutableCallSite(V))
3508     if (CS.isReturnNonNull())
3509       return true;
3510 
3511   return false;
3512 }
3513 
3514 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3515                                                   const Instruction *CtxI,
3516                                                   const DominatorTree *DT) {
3517   assert(V->getType()->isPointerTy() && "V must be pointer type");
3518   assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
3519   assert(CtxI && "Context instruction required for analysis");
3520   assert(DT && "Dominator tree required for analysis");
3521 
3522   unsigned NumUsesExplored = 0;
3523   for (auto *U : V->users()) {
3524     // Avoid massive lists
3525     if (NumUsesExplored >= DomConditionsMaxUses)
3526       break;
3527     NumUsesExplored++;
3528 
3529     // If the value is used as an argument to a call or invoke, then argument
3530     // attributes may provide an answer about null-ness.
3531     if (auto CS = ImmutableCallSite(U))
3532       if (auto *CalledFunc = CS.getCalledFunction())
3533         for (const Argument &Arg : CalledFunc->args())
3534           if (CS.getArgOperand(Arg.getArgNo()) == V &&
3535               Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
3536             return true;
3537 
3538     // Consider only compare instructions uniquely controlling a branch
3539     CmpInst::Predicate Pred;
3540     if (!match(const_cast<User *>(U),
3541                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
3542         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
3543       continue;
3544 
3545     for (auto *CmpU : U->users()) {
3546       if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
3547         assert(BI->isConditional() && "uses a comparison!");
3548 
3549         BasicBlock *NonNullSuccessor =
3550             BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
3551         BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3552         if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3553           return true;
3554       } else if (Pred == ICmpInst::ICMP_NE &&
3555                  match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
3556                  DT->dominates(cast<Instruction>(CmpU), CtxI)) {
3557         return true;
3558       }
3559     }
3560   }
3561 
3562   return false;
3563 }
3564 
3565 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3566                             const DominatorTree *DT) {
3567   if (isa<ConstantPointerNull>(V) || isa<UndefValue>(V))
3568     return false;
3569 
3570   if (isKnownNonNull(V))
3571     return true;
3572 
3573   if (!CtxI || !DT)
3574     return false;
3575 
3576   return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT);
3577 }
3578 
3579 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
3580                                                    const Value *RHS,
3581                                                    const DataLayout &DL,
3582                                                    AssumptionCache *AC,
3583                                                    const Instruction *CxtI,
3584                                                    const DominatorTree *DT) {
3585   // Multiplying n * m significant bits yields a result of n + m significant
3586   // bits. If the total number of significant bits does not exceed the
3587   // result bit width (minus 1), there is no overflow.
3588   // This means if we have enough leading zero bits in the operands
3589   // we can guarantee that the result does not overflow.
3590   // Ref: "Hacker's Delight" by Henry Warren
3591   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3592   KnownBits LHSKnown(BitWidth);
3593   KnownBits RHSKnown(BitWidth);
3594   computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3595   computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3596   // Note that underestimating the number of zero bits gives a more
3597   // conservative answer.
3598   unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
3599                       RHSKnown.countMinLeadingZeros();
3600   // First handle the easy case: if we have enough zero bits there's
3601   // definitely no overflow.
3602   if (ZeroBits >= BitWidth)
3603     return OverflowResult::NeverOverflows;
3604 
3605   // Get the largest possible values for each operand.
3606   APInt LHSMax = ~LHSKnown.Zero;
3607   APInt RHSMax = ~RHSKnown.Zero;
3608 
3609   // We know the multiply operation doesn't overflow if the maximum values for
3610   // each operand will not overflow after we multiply them together.
3611   bool MaxOverflow;
3612   (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3613   if (!MaxOverflow)
3614     return OverflowResult::NeverOverflows;
3615 
3616   // We know it always overflows if multiplying the smallest possible values for
3617   // the operands also results in overflow.
3618   bool MinOverflow;
3619   (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
3620   if (MinOverflow)
3621     return OverflowResult::AlwaysOverflows;
3622 
3623   return OverflowResult::MayOverflow;
3624 }
3625 
3626 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
3627                                                    const Value *RHS,
3628                                                    const DataLayout &DL,
3629                                                    AssumptionCache *AC,
3630                                                    const Instruction *CxtI,
3631                                                    const DominatorTree *DT) {
3632   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3633   if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
3634     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3635 
3636     if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
3637       // The sign bit is set in both cases: this MUST overflow.
3638       // Create a simple add instruction, and insert it into the struct.
3639       return OverflowResult::AlwaysOverflows;
3640     }
3641 
3642     if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
3643       // The sign bit is clear in both cases: this CANNOT overflow.
3644       // Create a simple add instruction, and insert it into the struct.
3645       return OverflowResult::NeverOverflows;
3646     }
3647   }
3648 
3649   return OverflowResult::MayOverflow;
3650 }
3651 
3652 /// \brief Return true if we can prove that adding the two values of the
3653 /// knownbits will not overflow.
3654 /// Otherwise return false.
3655 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
3656                                     const KnownBits &RHSKnown) {
3657   // Addition of two 2's complement numbers having opposite signs will never
3658   // overflow.
3659   if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
3660       (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
3661     return true;
3662 
3663   // If either of the values is known to be non-negative, adding them can only
3664   // overflow if the second is also non-negative, so we can assume that.
3665   // Two non-negative numbers will only overflow if there is a carry to the
3666   // sign bit, so we can check if even when the values are as big as possible
3667   // there is no overflow to the sign bit.
3668   if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
3669     APInt MaxLHS = ~LHSKnown.Zero;
3670     MaxLHS.clearSignBit();
3671     APInt MaxRHS = ~RHSKnown.Zero;
3672     MaxRHS.clearSignBit();
3673     APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
3674     return Result.isSignBitClear();
3675   }
3676 
3677   // If either of the values is known to be negative, adding them can only
3678   // overflow if the second is also negative, so we can assume that.
3679   // Two negative number will only overflow if there is no carry to the sign
3680   // bit, so we can check if even when the values are as small as possible
3681   // there is overflow to the sign bit.
3682   if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
3683     APInt MinLHS = LHSKnown.One;
3684     MinLHS.clearSignBit();
3685     APInt MinRHS = RHSKnown.One;
3686     MinRHS.clearSignBit();
3687     APInt Result = std::move(MinLHS) + std::move(MinRHS);
3688     return Result.isSignBitSet();
3689   }
3690 
3691   // If we reached here it means that we know nothing about the sign bits.
3692   // In this case we can't know if there will be an overflow, since by
3693   // changing the sign bits any two values can be made to overflow.
3694   return false;
3695 }
3696 
3697 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3698                                                   const Value *RHS,
3699                                                   const AddOperator *Add,
3700                                                   const DataLayout &DL,
3701                                                   AssumptionCache *AC,
3702                                                   const Instruction *CxtI,
3703                                                   const DominatorTree *DT) {
3704   if (Add && Add->hasNoSignedWrap()) {
3705     return OverflowResult::NeverOverflows;
3706   }
3707 
3708   // If LHS and RHS each have at least two sign bits, the addition will look
3709   // like
3710   //
3711   // XX..... +
3712   // YY.....
3713   //
3714   // If the carry into the most significant position is 0, X and Y can't both
3715   // be 1 and therefore the carry out of the addition is also 0.
3716   //
3717   // If the carry into the most significant position is 1, X and Y can't both
3718   // be 0 and therefore the carry out of the addition is also 1.
3719   //
3720   // Since the carry into the most significant position is always equal to
3721   // the carry out of the addition, there is no signed overflow.
3722   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3723       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3724     return OverflowResult::NeverOverflows;
3725 
3726   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3727   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3728 
3729   if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
3730     return OverflowResult::NeverOverflows;
3731 
3732   // The remaining code needs Add to be available. Early returns if not so.
3733   if (!Add)
3734     return OverflowResult::MayOverflow;
3735 
3736   // If the sign of Add is the same as at least one of the operands, this add
3737   // CANNOT overflow. This is particularly useful when the sum is
3738   // @llvm.assume'ed non-negative rather than proved so from analyzing its
3739   // operands.
3740   bool LHSOrRHSKnownNonNegative =
3741       (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
3742   bool LHSOrRHSKnownNegative =
3743       (LHSKnown.isNegative() || RHSKnown.isNegative());
3744   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3745     KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
3746     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
3747         (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
3748       return OverflowResult::NeverOverflows;
3749     }
3750   }
3751 
3752   return OverflowResult::MayOverflow;
3753 }
3754 
3755 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
3756                                      const DominatorTree &DT) {
3757 #ifndef NDEBUG
3758   auto IID = II->getIntrinsicID();
3759   assert((IID == Intrinsic::sadd_with_overflow ||
3760           IID == Intrinsic::uadd_with_overflow ||
3761           IID == Intrinsic::ssub_with_overflow ||
3762           IID == Intrinsic::usub_with_overflow ||
3763           IID == Intrinsic::smul_with_overflow ||
3764           IID == Intrinsic::umul_with_overflow) &&
3765          "Not an overflow intrinsic!");
3766 #endif
3767 
3768   SmallVector<const BranchInst *, 2> GuardingBranches;
3769   SmallVector<const ExtractValueInst *, 2> Results;
3770 
3771   for (const User *U : II->users()) {
3772     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3773       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3774 
3775       if (EVI->getIndices()[0] == 0)
3776         Results.push_back(EVI);
3777       else {
3778         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3779 
3780         for (const auto *U : EVI->users())
3781           if (const auto *B = dyn_cast<BranchInst>(U)) {
3782             assert(B->isConditional() && "How else is it using an i1?");
3783             GuardingBranches.push_back(B);
3784           }
3785       }
3786     } else {
3787       // We are using the aggregate directly in a way we don't want to analyze
3788       // here (storing it to a global, say).
3789       return false;
3790     }
3791   }
3792 
3793   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3794     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3795     if (!NoWrapEdge.isSingleEdge())
3796       return false;
3797 
3798     // Check if all users of the add are provably no-wrap.
3799     for (const auto *Result : Results) {
3800       // If the extractvalue itself is not executed on overflow, the we don't
3801       // need to check each use separately, since domination is transitive.
3802       if (DT.dominates(NoWrapEdge, Result->getParent()))
3803         continue;
3804 
3805       for (auto &RU : Result->uses())
3806         if (!DT.dominates(NoWrapEdge, RU))
3807           return false;
3808     }
3809 
3810     return true;
3811   };
3812 
3813   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
3814 }
3815 
3816 
3817 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
3818                                                  const DataLayout &DL,
3819                                                  AssumptionCache *AC,
3820                                                  const Instruction *CxtI,
3821                                                  const DominatorTree *DT) {
3822   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3823                                        Add, DL, AC, CxtI, DT);
3824 }
3825 
3826 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
3827                                                  const Value *RHS,
3828                                                  const DataLayout &DL,
3829                                                  AssumptionCache *AC,
3830                                                  const Instruction *CxtI,
3831                                                  const DominatorTree *DT) {
3832   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3833 }
3834 
3835 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3836   // A memory operation returns normally if it isn't volatile. A volatile
3837   // operation is allowed to trap.
3838   //
3839   // An atomic operation isn't guaranteed to return in a reasonable amount of
3840   // time because it's possible for another thread to interfere with it for an
3841   // arbitrary length of time, but programs aren't allowed to rely on that.
3842   if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3843     return !LI->isVolatile();
3844   if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3845     return !SI->isVolatile();
3846   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3847     return !CXI->isVolatile();
3848   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3849     return !RMWI->isVolatile();
3850   if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3851     return !MII->isVolatile();
3852 
3853   // If there is no successor, then execution can't transfer to it.
3854   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3855     return !CRI->unwindsToCaller();
3856   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3857     return !CatchSwitch->unwindsToCaller();
3858   if (isa<ResumeInst>(I))
3859     return false;
3860   if (isa<ReturnInst>(I))
3861     return false;
3862   if (isa<UnreachableInst>(I))
3863     return false;
3864 
3865   // Calls can throw, or contain an infinite loop, or kill the process.
3866   if (auto CS = ImmutableCallSite(I)) {
3867     // Call sites that throw have implicit non-local control flow.
3868     if (!CS.doesNotThrow())
3869       return false;
3870 
3871     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3872     // etc. and thus not return.  However, LLVM already assumes that
3873     //
3874     //  - Thread exiting actions are modeled as writes to memory invisible to
3875     //    the program.
3876     //
3877     //  - Loops that don't have side effects (side effects are volatile/atomic
3878     //    stores and IO) always terminate (see http://llvm.org/PR965).
3879     //    Furthermore IO itself is also modeled as writes to memory invisible to
3880     //    the program.
3881     //
3882     // We rely on those assumptions here, and use the memory effects of the call
3883     // target as a proxy for checking that it always returns.
3884 
3885     // FIXME: This isn't aggressive enough; a call which only writes to a global
3886     // is guaranteed to return.
3887     return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3888            match(I, m_Intrinsic<Intrinsic::assume>());
3889   }
3890 
3891   // Other instructions return normally.
3892   return true;
3893 }
3894 
3895 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3896                                                   const Loop *L) {
3897   // The loop header is guaranteed to be executed for every iteration.
3898   //
3899   // FIXME: Relax this constraint to cover all basic blocks that are
3900   // guaranteed to be executed at every iteration.
3901   if (I->getParent() != L->getHeader()) return false;
3902 
3903   for (const Instruction &LI : *L->getHeader()) {
3904     if (&LI == I) return true;
3905     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3906   }
3907   llvm_unreachable("Instruction not contained in its own parent basic block.");
3908 }
3909 
3910 bool llvm::propagatesFullPoison(const Instruction *I) {
3911   switch (I->getOpcode()) {
3912   case Instruction::Add:
3913   case Instruction::Sub:
3914   case Instruction::Xor:
3915   case Instruction::Trunc:
3916   case Instruction::BitCast:
3917   case Instruction::AddrSpaceCast:
3918   case Instruction::Mul:
3919   case Instruction::Shl:
3920   case Instruction::GetElementPtr:
3921     // These operations all propagate poison unconditionally. Note that poison
3922     // is not any particular value, so xor or subtraction of poison with
3923     // itself still yields poison, not zero.
3924     return true;
3925 
3926   case Instruction::AShr:
3927   case Instruction::SExt:
3928     // For these operations, one bit of the input is replicated across
3929     // multiple output bits. A replicated poison bit is still poison.
3930     return true;
3931 
3932   case Instruction::ICmp:
3933     // Comparing poison with any value yields poison.  This is why, for
3934     // instance, x s< (x +nsw 1) can be folded to true.
3935     return true;
3936 
3937   default:
3938     return false;
3939   }
3940 }
3941 
3942 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3943   switch (I->getOpcode()) {
3944     case Instruction::Store:
3945       return cast<StoreInst>(I)->getPointerOperand();
3946 
3947     case Instruction::Load:
3948       return cast<LoadInst>(I)->getPointerOperand();
3949 
3950     case Instruction::AtomicCmpXchg:
3951       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3952 
3953     case Instruction::AtomicRMW:
3954       return cast<AtomicRMWInst>(I)->getPointerOperand();
3955 
3956     case Instruction::UDiv:
3957     case Instruction::SDiv:
3958     case Instruction::URem:
3959     case Instruction::SRem:
3960       return I->getOperand(1);
3961 
3962     default:
3963       return nullptr;
3964   }
3965 }
3966 
3967 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
3968   // We currently only look for uses of poison values within the same basic
3969   // block, as that makes it easier to guarantee that the uses will be
3970   // executed given that PoisonI is executed.
3971   //
3972   // FIXME: Expand this to consider uses beyond the same basic block. To do
3973   // this, look out for the distinction between post-dominance and strong
3974   // post-dominance.
3975   const BasicBlock *BB = PoisonI->getParent();
3976 
3977   // Set of instructions that we have proved will yield poison if PoisonI
3978   // does.
3979   SmallSet<const Value *, 16> YieldsPoison;
3980   SmallSet<const BasicBlock *, 4> Visited;
3981   YieldsPoison.insert(PoisonI);
3982   Visited.insert(PoisonI->getParent());
3983 
3984   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
3985 
3986   unsigned Iter = 0;
3987   while (Iter++ < MaxDepth) {
3988     for (auto &I : make_range(Begin, End)) {
3989       if (&I != PoisonI) {
3990         const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
3991         if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
3992           return true;
3993         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
3994           return false;
3995       }
3996 
3997       // Mark poison that propagates from I through uses of I.
3998       if (YieldsPoison.count(&I)) {
3999         for (const User *User : I.users()) {
4000           const Instruction *UserI = cast<Instruction>(User);
4001           if (propagatesFullPoison(UserI))
4002             YieldsPoison.insert(User);
4003         }
4004       }
4005     }
4006 
4007     if (auto *NextBB = BB->getSingleSuccessor()) {
4008       if (Visited.insert(NextBB).second) {
4009         BB = NextBB;
4010         Begin = BB->getFirstNonPHI()->getIterator();
4011         End = BB->end();
4012         continue;
4013       }
4014     }
4015 
4016     break;
4017   }
4018   return false;
4019 }
4020 
4021 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4022   if (FMF.noNaNs())
4023     return true;
4024 
4025   if (auto *C = dyn_cast<ConstantFP>(V))
4026     return !C->isNaN();
4027   return false;
4028 }
4029 
4030 static bool isKnownNonZero(const Value *V) {
4031   if (auto *C = dyn_cast<ConstantFP>(V))
4032     return !C->isZero();
4033   return false;
4034 }
4035 
4036 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4037 /// Given non-min/max outer cmp/select from the clamp pattern this
4038 /// function recognizes if it can be substitued by a "canonical" min/max
4039 /// pattern.
4040 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4041                                                Value *CmpLHS, Value *CmpRHS,
4042                                                Value *TrueVal, Value *FalseVal,
4043                                                Value *&LHS, Value *&RHS) {
4044   // Try to match
4045   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4046   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4047   // and return description of the outer Max/Min.
4048 
4049   // First, check if select has inverse order:
4050   if (CmpRHS == FalseVal) {
4051     std::swap(TrueVal, FalseVal);
4052     Pred = CmpInst::getInversePredicate(Pred);
4053   }
4054 
4055   // Assume success now. If there's no match, callers should not use these anyway.
4056   LHS = TrueVal;
4057   RHS = FalseVal;
4058 
4059   const APFloat *FC1;
4060   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4061     return {SPF_UNKNOWN, SPNB_NA, false};
4062 
4063   const APFloat *FC2;
4064   switch (Pred) {
4065   case CmpInst::FCMP_OLT:
4066   case CmpInst::FCMP_OLE:
4067   case CmpInst::FCMP_ULT:
4068   case CmpInst::FCMP_ULE:
4069     if (match(FalseVal,
4070               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4071                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4072         FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4073       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4074     break;
4075   case CmpInst::FCMP_OGT:
4076   case CmpInst::FCMP_OGE:
4077   case CmpInst::FCMP_UGT:
4078   case CmpInst::FCMP_UGE:
4079     if (match(FalseVal,
4080               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4081                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4082         FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4083       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4084     break;
4085   default:
4086     break;
4087   }
4088 
4089   return {SPF_UNKNOWN, SPNB_NA, false};
4090 }
4091 
4092 /// Match non-obvious integer minimum and maximum sequences.
4093 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4094                                        Value *CmpLHS, Value *CmpRHS,
4095                                        Value *TrueVal, Value *FalseVal,
4096                                        Value *&LHS, Value *&RHS) {
4097   // Assume success. If there's no match, callers should not use these anyway.
4098   LHS = TrueVal;
4099   RHS = FalseVal;
4100 
4101   // Recognize variations of:
4102   // CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4103   const APInt *C1;
4104   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4105     const APInt *C2;
4106 
4107     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4108     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4109         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4110       return {SPF_SMAX, SPNB_NA, false};
4111 
4112     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4113     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4114         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4115       return {SPF_SMIN, SPNB_NA, false};
4116 
4117     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4118     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4119         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4120       return {SPF_UMAX, SPNB_NA, false};
4121 
4122     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4123     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4124         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4125       return {SPF_UMIN, SPNB_NA, false};
4126   }
4127 
4128   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4129     return {SPF_UNKNOWN, SPNB_NA, false};
4130 
4131   // Z = X -nsw Y
4132   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4133   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4134   if (match(TrueVal, m_Zero()) &&
4135       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4136     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4137 
4138   // Z = X -nsw Y
4139   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4140   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4141   if (match(FalseVal, m_Zero()) &&
4142       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4143     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4144 
4145   if (!match(CmpRHS, m_APInt(C1)))
4146     return {SPF_UNKNOWN, SPNB_NA, false};
4147 
4148   // An unsigned min/max can be written with a signed compare.
4149   const APInt *C2;
4150   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4151       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4152     // Is the sign bit set?
4153     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4154     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4155     if (Pred == CmpInst::ICMP_SLT && *C1 == 0 && C2->isMaxSignedValue())
4156       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4157 
4158     // Is the sign bit clear?
4159     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4160     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4161     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4162         C2->isMinSignedValue())
4163       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4164   }
4165 
4166   // Look through 'not' ops to find disguised signed min/max.
4167   // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4168   // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4169   if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4170       match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4171     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4172 
4173   // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4174   // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4175   if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4176       match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4177     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4178 
4179   return {SPF_UNKNOWN, SPNB_NA, false};
4180 }
4181 
4182 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4183                                               FastMathFlags FMF,
4184                                               Value *CmpLHS, Value *CmpRHS,
4185                                               Value *TrueVal, Value *FalseVal,
4186                                               Value *&LHS, Value *&RHS) {
4187   LHS = CmpLHS;
4188   RHS = CmpRHS;
4189 
4190   // If the predicate is an "or-equal"  (FP) predicate, then signed zeroes may
4191   // return inconsistent results between implementations.
4192   //   (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4193   //   minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4194   // Therefore we behave conservatively and only proceed if at least one of the
4195   // operands is known to not be zero, or if we don't care about signed zeroes.
4196   switch (Pred) {
4197   default: break;
4198   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4199   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4200     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4201         !isKnownNonZero(CmpRHS))
4202       return {SPF_UNKNOWN, SPNB_NA, false};
4203   }
4204 
4205   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4206   bool Ordered = false;
4207 
4208   // When given one NaN and one non-NaN input:
4209   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4210   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4211   //     ordered comparison fails), which could be NaN or non-NaN.
4212   // so here we discover exactly what NaN behavior is required/accepted.
4213   if (CmpInst::isFPPredicate(Pred)) {
4214     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4215     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4216 
4217     if (LHSSafe && RHSSafe) {
4218       // Both operands are known non-NaN.
4219       NaNBehavior = SPNB_RETURNS_ANY;
4220     } else if (CmpInst::isOrdered(Pred)) {
4221       // An ordered comparison will return false when given a NaN, so it
4222       // returns the RHS.
4223       Ordered = true;
4224       if (LHSSafe)
4225         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4226         NaNBehavior = SPNB_RETURNS_NAN;
4227       else if (RHSSafe)
4228         NaNBehavior = SPNB_RETURNS_OTHER;
4229       else
4230         // Completely unsafe.
4231         return {SPF_UNKNOWN, SPNB_NA, false};
4232     } else {
4233       Ordered = false;
4234       // An unordered comparison will return true when given a NaN, so it
4235       // returns the LHS.
4236       if (LHSSafe)
4237         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4238         NaNBehavior = SPNB_RETURNS_OTHER;
4239       else if (RHSSafe)
4240         NaNBehavior = SPNB_RETURNS_NAN;
4241       else
4242         // Completely unsafe.
4243         return {SPF_UNKNOWN, SPNB_NA, false};
4244     }
4245   }
4246 
4247   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4248     std::swap(CmpLHS, CmpRHS);
4249     Pred = CmpInst::getSwappedPredicate(Pred);
4250     if (NaNBehavior == SPNB_RETURNS_NAN)
4251       NaNBehavior = SPNB_RETURNS_OTHER;
4252     else if (NaNBehavior == SPNB_RETURNS_OTHER)
4253       NaNBehavior = SPNB_RETURNS_NAN;
4254     Ordered = !Ordered;
4255   }
4256 
4257   // ([if]cmp X, Y) ? X : Y
4258   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4259     switch (Pred) {
4260     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4261     case ICmpInst::ICMP_UGT:
4262     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4263     case ICmpInst::ICMP_SGT:
4264     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4265     case ICmpInst::ICMP_ULT:
4266     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4267     case ICmpInst::ICMP_SLT:
4268     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4269     case FCmpInst::FCMP_UGT:
4270     case FCmpInst::FCMP_UGE:
4271     case FCmpInst::FCMP_OGT:
4272     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4273     case FCmpInst::FCMP_ULT:
4274     case FCmpInst::FCMP_ULE:
4275     case FCmpInst::FCMP_OLT:
4276     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4277     }
4278   }
4279 
4280   const APInt *C1;
4281   if (match(CmpRHS, m_APInt(C1))) {
4282     if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4283         (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4284 
4285       // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4286       // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4287       if (Pred == ICmpInst::ICMP_SGT && (*C1 == 0 || C1->isAllOnesValue())) {
4288         return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4289       }
4290 
4291       // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4292       // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4293       if (Pred == ICmpInst::ICMP_SLT && (*C1 == 0 || *C1 == 1)) {
4294         return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4295       }
4296     }
4297   }
4298 
4299   if (CmpInst::isIntPredicate(Pred))
4300     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4301 
4302   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4303   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4304   // semantics than minNum. Be conservative in such case.
4305   if (NaNBehavior != SPNB_RETURNS_ANY ||
4306       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4307        !isKnownNonZero(CmpRHS)))
4308     return {SPF_UNKNOWN, SPNB_NA, false};
4309 
4310   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4311 }
4312 
4313 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4314                               Instruction::CastOps *CastOp) {
4315   auto *Cast1 = dyn_cast<CastInst>(V1);
4316   if (!Cast1)
4317     return nullptr;
4318 
4319   *CastOp = Cast1->getOpcode();
4320   Type *SrcTy = Cast1->getSrcTy();
4321   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4322     // If V1 and V2 are both the same cast from the same type, look through V1.
4323     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4324       return Cast2->getOperand(0);
4325     return nullptr;
4326   }
4327 
4328   auto *C = dyn_cast<Constant>(V2);
4329   if (!C)
4330     return nullptr;
4331 
4332   Constant *CastedTo = nullptr;
4333   switch (*CastOp) {
4334   case Instruction::ZExt:
4335     if (CmpI->isUnsigned())
4336       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4337     break;
4338   case Instruction::SExt:
4339     if (CmpI->isSigned())
4340       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4341     break;
4342   case Instruction::Trunc:
4343     CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4344     break;
4345   case Instruction::FPTrunc:
4346     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4347     break;
4348   case Instruction::FPExt:
4349     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4350     break;
4351   case Instruction::FPToUI:
4352     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4353     break;
4354   case Instruction::FPToSI:
4355     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4356     break;
4357   case Instruction::UIToFP:
4358     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4359     break;
4360   case Instruction::SIToFP:
4361     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4362     break;
4363   default:
4364     break;
4365   }
4366 
4367   if (!CastedTo)
4368     return nullptr;
4369 
4370   // Make sure the cast doesn't lose any information.
4371   Constant *CastedBack =
4372       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4373   if (CastedBack != C)
4374     return nullptr;
4375 
4376   return CastedTo;
4377 }
4378 
4379 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4380                                              Instruction::CastOps *CastOp) {
4381   SelectInst *SI = dyn_cast<SelectInst>(V);
4382   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4383 
4384   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4385   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4386 
4387   CmpInst::Predicate Pred = CmpI->getPredicate();
4388   Value *CmpLHS = CmpI->getOperand(0);
4389   Value *CmpRHS = CmpI->getOperand(1);
4390   Value *TrueVal = SI->getTrueValue();
4391   Value *FalseVal = SI->getFalseValue();
4392   FastMathFlags FMF;
4393   if (isa<FPMathOperator>(CmpI))
4394     FMF = CmpI->getFastMathFlags();
4395 
4396   // Bail out early.
4397   if (CmpI->isEquality())
4398     return {SPF_UNKNOWN, SPNB_NA, false};
4399 
4400   // Deal with type mismatches.
4401   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4402     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
4403       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4404                                   cast<CastInst>(TrueVal)->getOperand(0), C,
4405                                   LHS, RHS);
4406     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
4407       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4408                                   C, cast<CastInst>(FalseVal)->getOperand(0),
4409                                   LHS, RHS);
4410   }
4411   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4412                               LHS, RHS);
4413 }
4414 
4415 /// Return true if "icmp Pred LHS RHS" is always true.
4416 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
4417                             const Value *RHS, const DataLayout &DL,
4418                             unsigned Depth) {
4419   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4420   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4421     return true;
4422 
4423   switch (Pred) {
4424   default:
4425     return false;
4426 
4427   case CmpInst::ICMP_SLE: {
4428     const APInt *C;
4429 
4430     // LHS s<= LHS +_{nsw} C   if C >= 0
4431     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4432       return !C->isNegative();
4433     return false;
4434   }
4435 
4436   case CmpInst::ICMP_ULE: {
4437     const APInt *C;
4438 
4439     // LHS u<= LHS +_{nuw} C   for any C
4440     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4441       return true;
4442 
4443     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4444     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4445                                        const Value *&X,
4446                                        const APInt *&CA, const APInt *&CB) {
4447       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4448           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4449         return true;
4450 
4451       // If X & C == 0 then (X | C) == X +_{nuw} C
4452       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4453           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4454         KnownBits Known(CA->getBitWidth());
4455         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
4456                          /*CxtI*/ nullptr, /*DT*/ nullptr);
4457         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
4458           return true;
4459       }
4460 
4461       return false;
4462     };
4463 
4464     const Value *X;
4465     const APInt *CLHS, *CRHS;
4466     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4467       return CLHS->ule(*CRHS);
4468 
4469     return false;
4470   }
4471   }
4472 }
4473 
4474 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4475 /// ALHS ARHS" is true.  Otherwise, return None.
4476 static Optional<bool>
4477 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
4478                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
4479                       const DataLayout &DL, unsigned Depth) {
4480   switch (Pred) {
4481   default:
4482     return None;
4483 
4484   case CmpInst::ICMP_SLT:
4485   case CmpInst::ICMP_SLE:
4486     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
4487         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
4488       return true;
4489     return None;
4490 
4491   case CmpInst::ICMP_ULT:
4492   case CmpInst::ICMP_ULE:
4493     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
4494         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
4495       return true;
4496     return None;
4497   }
4498 }
4499 
4500 /// Return true if the operands of the two compares match.  IsSwappedOps is true
4501 /// when the operands match, but are swapped.
4502 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4503                           const Value *BLHS, const Value *BRHS,
4504                           bool &IsSwappedOps) {
4505 
4506   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4507   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4508   return IsMatchingOps || IsSwappedOps;
4509 }
4510 
4511 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4512 /// true.  Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4513 /// BRHS" is false.  Otherwise, return None if we can't infer anything.
4514 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
4515                                                     const Value *ALHS,
4516                                                     const Value *ARHS,
4517                                                     CmpInst::Predicate BPred,
4518                                                     const Value *BLHS,
4519                                                     const Value *BRHS,
4520                                                     bool IsSwappedOps) {
4521   // Canonicalize the operands so they're matching.
4522   if (IsSwappedOps) {
4523     std::swap(BLHS, BRHS);
4524     BPred = ICmpInst::getSwappedPredicate(BPred);
4525   }
4526   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4527     return true;
4528   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4529     return false;
4530 
4531   return None;
4532 }
4533 
4534 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4535 /// true.  Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4536 /// C2" is false.  Otherwise, return None if we can't infer anything.
4537 static Optional<bool>
4538 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
4539                                  const ConstantInt *C1,
4540                                  CmpInst::Predicate BPred,
4541                                  const Value *BLHS, const ConstantInt *C2) {
4542   assert(ALHS == BLHS && "LHS operands must match.");
4543   ConstantRange DomCR =
4544       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
4545   ConstantRange CR =
4546       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
4547   ConstantRange Intersection = DomCR.intersectWith(CR);
4548   ConstantRange Difference = DomCR.difference(CR);
4549   if (Intersection.isEmptySet())
4550     return false;
4551   if (Difference.isEmptySet())
4552     return true;
4553   return None;
4554 }
4555 
4556 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
4557 /// false.  Otherwise, return None if we can't infer anything.
4558 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
4559                                          const ICmpInst *RHS,
4560                                          const DataLayout &DL, bool LHSIsTrue,
4561                                          unsigned Depth) {
4562   Value *ALHS = LHS->getOperand(0);
4563   Value *ARHS = LHS->getOperand(1);
4564   // The rest of the logic assumes the LHS condition is true.  If that's not the
4565   // case, invert the predicate to make it so.
4566   ICmpInst::Predicate APred =
4567       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
4568 
4569   Value *BLHS = RHS->getOperand(0);
4570   Value *BRHS = RHS->getOperand(1);
4571   ICmpInst::Predicate BPred = RHS->getPredicate();
4572 
4573   // Can we infer anything when the two compares have matching operands?
4574   bool IsSwappedOps;
4575   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4576     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
4577             APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4578       return Implication;
4579     // No amount of additional analysis will infer the second condition, so
4580     // early exit.
4581     return None;
4582   }
4583 
4584   // Can we infer anything when the LHS operands match and the RHS operands are
4585   // constants (not necessarily matching)?
4586   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4587     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
4588             APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4589             cast<ConstantInt>(BRHS)))
4590       return Implication;
4591     // No amount of additional analysis will infer the second condition, so
4592     // early exit.
4593     return None;
4594   }
4595 
4596   if (APred == BPred)
4597     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
4598   return None;
4599 }
4600 
4601 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
4602 /// false.  Otherwise, return None if we can't infer anything.  We expect the
4603 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
4604 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
4605                                          const ICmpInst *RHS,
4606                                          const DataLayout &DL, bool LHSIsTrue,
4607                                          unsigned Depth) {
4608   // The LHS must be an 'or' or an 'and' instruction.
4609   assert((LHS->getOpcode() == Instruction::And ||
4610           LHS->getOpcode() == Instruction::Or) &&
4611          "Expected LHS to be 'and' or 'or'.");
4612 
4613   assert(Depth <= MaxDepth && "Hit recursion limit");
4614 
4615   // If the result of an 'or' is false, then we know both legs of the 'or' are
4616   // false.  Similarly, if the result of an 'and' is true, then we know both
4617   // legs of the 'and' are true.
4618   Value *ALHS, *ARHS;
4619   if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
4620       (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
4621     // FIXME: Make this non-recursion.
4622     if (Optional<bool> Implication =
4623             isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
4624       return Implication;
4625     if (Optional<bool> Implication =
4626             isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
4627       return Implication;
4628     return None;
4629   }
4630   return None;
4631 }
4632 
4633 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
4634                                         const DataLayout &DL, bool LHSIsTrue,
4635                                         unsigned Depth) {
4636   // Bail out when we hit the limit.
4637   if (Depth == MaxDepth)
4638     return None;
4639 
4640   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
4641   // example.
4642   if (LHS->getType() != RHS->getType())
4643     return None;
4644 
4645   Type *OpTy = LHS->getType();
4646   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
4647 
4648   // LHS ==> RHS by definition
4649   if (LHS == RHS)
4650     return LHSIsTrue;
4651 
4652   // FIXME: Extending the code below to handle vectors.
4653   if (OpTy->isVectorTy())
4654     return None;
4655 
4656   assert(OpTy->isIntegerTy(1) && "implied by above");
4657 
4658   // Both LHS and RHS are icmps.
4659   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
4660   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
4661   if (LHSCmp && RHSCmp)
4662     return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
4663 
4664   // The LHS should be an 'or' or an 'and' instruction.  We expect the RHS to be
4665   // an icmp. FIXME: Add support for and/or on the RHS.
4666   const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
4667   if (LHSBO && RHSCmp) {
4668     if ((LHSBO->getOpcode() == Instruction::And ||
4669          LHSBO->getOpcode() == Instruction::Or))
4670       return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
4671   }
4672   return None;
4673 }
4674