1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumptionCache.h"
28 #include "llvm/Analysis/GuardUtils.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Operator.h"
58 #include "llvm/IR/PatternMatch.h"
59 #include "llvm/IR/Type.h"
60 #include "llvm/IR/User.h"
61 #include "llvm/IR/Value.h"
62 #include "llvm/Support/Casting.h"
63 #include "llvm/Support/CommandLine.h"
64 #include "llvm/Support/Compiler.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/KnownBits.h"
67 #include "llvm/Support/MathExtras.h"
68 #include <algorithm>
69 #include <array>
70 #include <cassert>
71 #include <cstdint>
72 #include <iterator>
73 #include <utility>
74 
75 using namespace llvm;
76 using namespace llvm::PatternMatch;
77 
78 const unsigned MaxDepth = 6;
79 
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83                                               cl::Hidden, cl::init(20));
84 
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88   if (unsigned BitWidth = Ty->getScalarSizeInBits())
89     return BitWidth;
90 
91   return DL.getIndexTypeSizeInBits(Ty);
92 }
93 
94 namespace {
95 
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101   const DataLayout &DL;
102   AssumptionCache *AC;
103   const Instruction *CxtI;
104   const DominatorTree *DT;
105 
106   // Unlike the other analyses, this may be a nullptr because not all clients
107   // provide it currently.
108   OptimizationRemarkEmitter *ORE;
109 
110   /// Set of assumptions that should be excluded from further queries.
111   /// This is because of the potential for mutual recursion to cause
112   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113   /// classic case of this is assume(x = y), which will attempt to determine
114   /// bits in x from bits in y, which will attempt to determine bits in y from
115   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117   /// (all of which can call computeKnownBits), and so on.
118   std::array<const Value *, MaxDepth> Excluded;
119 
120   /// If true, it is safe to use metadata during simplification.
121   InstrInfoQuery IIQ;
122 
123   unsigned NumExcluded = 0;
124 
125   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126         const DominatorTree *DT, bool UseInstrInfo,
127         OptimizationRemarkEmitter *ORE = nullptr)
128       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129 
130   Query(const Query &Q, const Value *NewExcl)
131       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132         NumExcluded(Q.NumExcluded) {
133     Excluded = Q.Excluded;
134     Excluded[NumExcluded++] = NewExcl;
135     assert(NumExcluded <= Excluded.size());
136   }
137 
138   bool isExcluded(const Value *Value) const {
139     if (NumExcluded == 0)
140       return false;
141     auto End = Excluded.begin() + NumExcluded;
142     return std::find(Excluded.begin(), End, Value) != End;
143   }
144 };
145 
146 } // end anonymous namespace
147 
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151   // If we've been provided with a context instruction, then use that (provided
152   // it has been inserted).
153   if (CxtI && CxtI->getParent())
154     return CxtI;
155 
156   // If the value is really an already-inserted instruction, then use that.
157   CxtI = dyn_cast<Instruction>(V);
158   if (CxtI && CxtI->getParent())
159     return CxtI;
160 
161   return nullptr;
162 }
163 
164 static void computeKnownBits(const Value *V, KnownBits &Known,
165                              unsigned Depth, const Query &Q);
166 
167 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
168                             const DataLayout &DL, unsigned Depth,
169                             AssumptionCache *AC, const Instruction *CxtI,
170                             const DominatorTree *DT,
171                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
172   ::computeKnownBits(V, Known, Depth,
173                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
174 }
175 
176 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
177                                   const Query &Q);
178 
179 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
180                                  unsigned Depth, AssumptionCache *AC,
181                                  const Instruction *CxtI,
182                                  const DominatorTree *DT,
183                                  OptimizationRemarkEmitter *ORE,
184                                  bool UseInstrInfo) {
185   return ::computeKnownBits(
186       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
187 }
188 
189 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
190                                const DataLayout &DL, AssumptionCache *AC,
191                                const Instruction *CxtI, const DominatorTree *DT,
192                                bool UseInstrInfo) {
193   assert(LHS->getType() == RHS->getType() &&
194          "LHS and RHS should have the same type");
195   assert(LHS->getType()->isIntOrIntVectorTy() &&
196          "LHS and RHS should be integers");
197   // Look for an inverted mask: (X & ~M) op (Y & M).
198   Value *M;
199   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
200       match(RHS, m_c_And(m_Specific(M), m_Value())))
201     return true;
202   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
203       match(LHS, m_c_And(m_Specific(M), m_Value())))
204     return true;
205   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
206   KnownBits LHSKnown(IT->getBitWidth());
207   KnownBits RHSKnown(IT->getBitWidth());
208   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
209   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
210   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
211 }
212 
213 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
214   for (const User *U : CxtI->users()) {
215     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
216       if (IC->isEquality())
217         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
218           if (C->isNullValue())
219             continue;
220     return false;
221   }
222   return true;
223 }
224 
225 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
226                                    const Query &Q);
227 
228 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
229                                   bool OrZero, unsigned Depth,
230                                   AssumptionCache *AC, const Instruction *CxtI,
231                                   const DominatorTree *DT, bool UseInstrInfo) {
232   return ::isKnownToBeAPowerOfTwo(
233       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
234 }
235 
236 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
237 
238 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
239                           AssumptionCache *AC, const Instruction *CxtI,
240                           const DominatorTree *DT, bool UseInstrInfo) {
241   return ::isKnownNonZero(V, Depth,
242                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
243 }
244 
245 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
246                               unsigned Depth, AssumptionCache *AC,
247                               const Instruction *CxtI, const DominatorTree *DT,
248                               bool UseInstrInfo) {
249   KnownBits Known =
250       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
251   return Known.isNonNegative();
252 }
253 
254 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
255                            AssumptionCache *AC, const Instruction *CxtI,
256                            const DominatorTree *DT, bool UseInstrInfo) {
257   if (auto *CI = dyn_cast<ConstantInt>(V))
258     return CI->getValue().isStrictlyPositive();
259 
260   // TODO: We'd doing two recursive queries here.  We should factor this such
261   // that only a single query is needed.
262   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
263          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
264 }
265 
266 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
267                            AssumptionCache *AC, const Instruction *CxtI,
268                            const DominatorTree *DT, bool UseInstrInfo) {
269   KnownBits Known =
270       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
271   return Known.isNegative();
272 }
273 
274 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
275 
276 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
277                            const DataLayout &DL, AssumptionCache *AC,
278                            const Instruction *CxtI, const DominatorTree *DT,
279                            bool UseInstrInfo) {
280   return ::isKnownNonEqual(V1, V2,
281                            Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
282                                  UseInstrInfo, /*ORE=*/nullptr));
283 }
284 
285 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
286                               const Query &Q);
287 
288 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
289                              const DataLayout &DL, unsigned Depth,
290                              AssumptionCache *AC, const Instruction *CxtI,
291                              const DominatorTree *DT, bool UseInstrInfo) {
292   return ::MaskedValueIsZero(
293       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
294 }
295 
296 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
297                                    const Query &Q);
298 
299 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
300                                   unsigned Depth, AssumptionCache *AC,
301                                   const Instruction *CxtI,
302                                   const DominatorTree *DT, bool UseInstrInfo) {
303   return ::ComputeNumSignBits(
304       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
305 }
306 
307 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
308                                    bool NSW,
309                                    KnownBits &KnownOut, KnownBits &Known2,
310                                    unsigned Depth, const Query &Q) {
311   unsigned BitWidth = KnownOut.getBitWidth();
312 
313   // If an initial sequence of bits in the result is not needed, the
314   // corresponding bits in the operands are not needed.
315   KnownBits LHSKnown(BitWidth);
316   computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
317   computeKnownBits(Op1, Known2, Depth + 1, Q);
318 
319   KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
320 }
321 
322 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
323                                 KnownBits &Known, KnownBits &Known2,
324                                 unsigned Depth, const Query &Q) {
325   unsigned BitWidth = Known.getBitWidth();
326   computeKnownBits(Op1, Known, Depth + 1, Q);
327   computeKnownBits(Op0, Known2, Depth + 1, Q);
328 
329   bool isKnownNegative = false;
330   bool isKnownNonNegative = false;
331   // If the multiplication is known not to overflow, compute the sign bit.
332   if (NSW) {
333     if (Op0 == Op1) {
334       // The product of a number with itself is non-negative.
335       isKnownNonNegative = true;
336     } else {
337       bool isKnownNonNegativeOp1 = Known.isNonNegative();
338       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
339       bool isKnownNegativeOp1 = Known.isNegative();
340       bool isKnownNegativeOp0 = Known2.isNegative();
341       // The product of two numbers with the same sign is non-negative.
342       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
343         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
344       // The product of a negative number and a non-negative number is either
345       // negative or zero.
346       if (!isKnownNonNegative)
347         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
348                            isKnownNonZero(Op0, Depth, Q)) ||
349                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
350                            isKnownNonZero(Op1, Depth, Q));
351     }
352   }
353 
354   assert(!Known.hasConflict() && !Known2.hasConflict());
355   // Compute a conservative estimate for high known-0 bits.
356   unsigned LeadZ =  std::max(Known.countMinLeadingZeros() +
357                              Known2.countMinLeadingZeros(),
358                              BitWidth) - BitWidth;
359   LeadZ = std::min(LeadZ, BitWidth);
360 
361   // The result of the bottom bits of an integer multiply can be
362   // inferred by looking at the bottom bits of both operands and
363   // multiplying them together.
364   // We can infer at least the minimum number of known trailing bits
365   // of both operands. Depending on number of trailing zeros, we can
366   // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
367   // a and b are divisible by m and n respectively.
368   // We then calculate how many of those bits are inferrable and set
369   // the output. For example, the i8 mul:
370   //  a = XXXX1100 (12)
371   //  b = XXXX1110 (14)
372   // We know the bottom 3 bits are zero since the first can be divided by
373   // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
374   // Applying the multiplication to the trimmed arguments gets:
375   //    XX11 (3)
376   //    X111 (7)
377   // -------
378   //    XX11
379   //   XX11
380   //  XX11
381   // XX11
382   // -------
383   // XXXXX01
384   // Which allows us to infer the 2 LSBs. Since we're multiplying the result
385   // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
386   // The proof for this can be described as:
387   // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
388   //      (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
389   //                    umin(countTrailingZeros(C2), C6) +
390   //                    umin(C5 - umin(countTrailingZeros(C1), C5),
391   //                         C6 - umin(countTrailingZeros(C2), C6)))) - 1)
392   // %aa = shl i8 %a, C5
393   // %bb = shl i8 %b, C6
394   // %aaa = or i8 %aa, C1
395   // %bbb = or i8 %bb, C2
396   // %mul = mul i8 %aaa, %bbb
397   // %mask = and i8 %mul, C7
398   //   =>
399   // %mask = i8 ((C1*C2)&C7)
400   // Where C5, C6 describe the known bits of %a, %b
401   // C1, C2 describe the known bottom bits of %a, %b.
402   // C7 describes the mask of the known bits of the result.
403   APInt Bottom0 = Known.One;
404   APInt Bottom1 = Known2.One;
405 
406   // How many times we'd be able to divide each argument by 2 (shr by 1).
407   // This gives us the number of trailing zeros on the multiplication result.
408   unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
409   unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
410   unsigned TrailZero0 = Known.countMinTrailingZeros();
411   unsigned TrailZero1 = Known2.countMinTrailingZeros();
412   unsigned TrailZ = TrailZero0 + TrailZero1;
413 
414   // Figure out the fewest known-bits operand.
415   unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
416                                       TrailBitsKnown1 - TrailZero1);
417   unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
418 
419   APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
420                       Bottom1.getLoBits(TrailBitsKnown1);
421 
422   Known.resetAll();
423   Known.Zero.setHighBits(LeadZ);
424   Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
425   Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
426 
427   // Only make use of no-wrap flags if we failed to compute the sign bit
428   // directly.  This matters if the multiplication always overflows, in
429   // which case we prefer to follow the result of the direct computation,
430   // though as the program is invoking undefined behaviour we can choose
431   // whatever we like here.
432   if (isKnownNonNegative && !Known.isNegative())
433     Known.makeNonNegative();
434   else if (isKnownNegative && !Known.isNonNegative())
435     Known.makeNegative();
436 }
437 
438 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
439                                              KnownBits &Known) {
440   unsigned BitWidth = Known.getBitWidth();
441   unsigned NumRanges = Ranges.getNumOperands() / 2;
442   assert(NumRanges >= 1);
443 
444   Known.Zero.setAllBits();
445   Known.One.setAllBits();
446 
447   for (unsigned i = 0; i < NumRanges; ++i) {
448     ConstantInt *Lower =
449         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
450     ConstantInt *Upper =
451         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
452     ConstantRange Range(Lower->getValue(), Upper->getValue());
453 
454     // The first CommonPrefixBits of all values in Range are equal.
455     unsigned CommonPrefixBits =
456         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
457 
458     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
459     Known.One &= Range.getUnsignedMax() & Mask;
460     Known.Zero &= ~Range.getUnsignedMax() & Mask;
461   }
462 }
463 
464 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
465   SmallVector<const Value *, 16> WorkSet(1, I);
466   SmallPtrSet<const Value *, 32> Visited;
467   SmallPtrSet<const Value *, 16> EphValues;
468 
469   // The instruction defining an assumption's condition itself is always
470   // considered ephemeral to that assumption (even if it has other
471   // non-ephemeral users). See r246696's test case for an example.
472   if (is_contained(I->operands(), E))
473     return true;
474 
475   while (!WorkSet.empty()) {
476     const Value *V = WorkSet.pop_back_val();
477     if (!Visited.insert(V).second)
478       continue;
479 
480     // If all uses of this value are ephemeral, then so is this value.
481     if (llvm::all_of(V->users(), [&](const User *U) {
482                                    return EphValues.count(U);
483                                  })) {
484       if (V == E)
485         return true;
486 
487       if (V == I || isSafeToSpeculativelyExecute(V)) {
488        EphValues.insert(V);
489        if (const User *U = dyn_cast<User>(V))
490          for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
491               J != JE; ++J)
492            WorkSet.push_back(*J);
493       }
494     }
495   }
496 
497   return false;
498 }
499 
500 // Is this an intrinsic that cannot be speculated but also cannot trap?
501 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
502   if (const CallInst *CI = dyn_cast<CallInst>(I))
503     if (Function *F = CI->getCalledFunction())
504       switch (F->getIntrinsicID()) {
505       default: break;
506       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
507       case Intrinsic::assume:
508       case Intrinsic::sideeffect:
509       case Intrinsic::dbg_declare:
510       case Intrinsic::dbg_value:
511       case Intrinsic::dbg_label:
512       case Intrinsic::invariant_start:
513       case Intrinsic::invariant_end:
514       case Intrinsic::lifetime_start:
515       case Intrinsic::lifetime_end:
516       case Intrinsic::objectsize:
517       case Intrinsic::ptr_annotation:
518       case Intrinsic::var_annotation:
519         return true;
520       }
521 
522   return false;
523 }
524 
525 bool llvm::isValidAssumeForContext(const Instruction *Inv,
526                                    const Instruction *CxtI,
527                                    const DominatorTree *DT) {
528   // There are two restrictions on the use of an assume:
529   //  1. The assume must dominate the context (or the control flow must
530   //     reach the assume whenever it reaches the context).
531   //  2. The context must not be in the assume's set of ephemeral values
532   //     (otherwise we will use the assume to prove that the condition
533   //     feeding the assume is trivially true, thus causing the removal of
534   //     the assume).
535 
536   if (DT) {
537     if (DT->dominates(Inv, CxtI))
538       return true;
539   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
540     // We don't have a DT, but this trivially dominates.
541     return true;
542   }
543 
544   // With or without a DT, the only remaining case we will check is if the
545   // instructions are in the same BB.  Give up if that is not the case.
546   if (Inv->getParent() != CxtI->getParent())
547     return false;
548 
549   // If we have a dom tree, then we now know that the assume doesn't dominate
550   // the other instruction.  If we don't have a dom tree then we can check if
551   // the assume is first in the BB.
552   if (!DT) {
553     // Search forward from the assume until we reach the context (or the end
554     // of the block); the common case is that the assume will come first.
555     for (auto I = std::next(BasicBlock::const_iterator(Inv)),
556          IE = Inv->getParent()->end(); I != IE; ++I)
557       if (&*I == CxtI)
558         return true;
559   }
560 
561   // The context comes first, but they're both in the same block. Make sure
562   // there is nothing in between that might interrupt the control flow.
563   for (BasicBlock::const_iterator I =
564          std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
565        I != IE; ++I)
566     if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
567       return false;
568 
569   return !isEphemeralValueOf(Inv, CxtI);
570 }
571 
572 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
573                                        unsigned Depth, const Query &Q) {
574   // Use of assumptions is context-sensitive. If we don't have a context, we
575   // cannot use them!
576   if (!Q.AC || !Q.CxtI)
577     return;
578 
579   unsigned BitWidth = Known.getBitWidth();
580 
581   // Note that the patterns below need to be kept in sync with the code
582   // in AssumptionCache::updateAffectedValues.
583 
584   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
585     if (!AssumeVH)
586       continue;
587     CallInst *I = cast<CallInst>(AssumeVH);
588     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
589            "Got assumption for the wrong function!");
590     if (Q.isExcluded(I))
591       continue;
592 
593     // Warning: This loop can end up being somewhat performance sensitive.
594     // We're running this loop for once for each value queried resulting in a
595     // runtime of ~O(#assumes * #values).
596 
597     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
598            "must be an assume intrinsic");
599 
600     Value *Arg = I->getArgOperand(0);
601 
602     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
603       assert(BitWidth == 1 && "assume operand is not i1?");
604       Known.setAllOnes();
605       return;
606     }
607     if (match(Arg, m_Not(m_Specific(V))) &&
608         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
609       assert(BitWidth == 1 && "assume operand is not i1?");
610       Known.setAllZero();
611       return;
612     }
613 
614     // The remaining tests are all recursive, so bail out if we hit the limit.
615     if (Depth == MaxDepth)
616       continue;
617 
618     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
619     if (!Cmp)
620       continue;
621 
622     Value *A, *B;
623     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
624 
625     CmpInst::Predicate Pred;
626     uint64_t C;
627     switch (Cmp->getPredicate()) {
628     default:
629       break;
630     case ICmpInst::ICMP_EQ:
631       // assume(v = a)
632       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
633           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
634         KnownBits RHSKnown(BitWidth);
635         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
636         Known.Zero |= RHSKnown.Zero;
637         Known.One  |= RHSKnown.One;
638       // assume(v & b = a)
639       } else if (match(Cmp,
640                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
641                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
642         KnownBits RHSKnown(BitWidth);
643         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
644         KnownBits MaskKnown(BitWidth);
645         computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
646 
647         // For those bits in the mask that are known to be one, we can propagate
648         // known bits from the RHS to V.
649         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
650         Known.One  |= RHSKnown.One  & MaskKnown.One;
651       // assume(~(v & b) = a)
652       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
653                                      m_Value(A))) &&
654                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
655         KnownBits RHSKnown(BitWidth);
656         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
657         KnownBits MaskKnown(BitWidth);
658         computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
659 
660         // For those bits in the mask that are known to be one, we can propagate
661         // inverted known bits from the RHS to V.
662         Known.Zero |= RHSKnown.One  & MaskKnown.One;
663         Known.One  |= RHSKnown.Zero & MaskKnown.One;
664       // assume(v | b = a)
665       } else if (match(Cmp,
666                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
667                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
668         KnownBits RHSKnown(BitWidth);
669         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
670         KnownBits BKnown(BitWidth);
671         computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
672 
673         // For those bits in B that are known to be zero, we can propagate known
674         // bits from the RHS to V.
675         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
676         Known.One  |= RHSKnown.One  & BKnown.Zero;
677       // assume(~(v | b) = a)
678       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
679                                      m_Value(A))) &&
680                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
681         KnownBits RHSKnown(BitWidth);
682         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
683         KnownBits BKnown(BitWidth);
684         computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
685 
686         // For those bits in B that are known to be zero, we can propagate
687         // inverted known bits from the RHS to V.
688         Known.Zero |= RHSKnown.One  & BKnown.Zero;
689         Known.One  |= RHSKnown.Zero & BKnown.Zero;
690       // assume(v ^ b = a)
691       } else if (match(Cmp,
692                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
693                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
694         KnownBits RHSKnown(BitWidth);
695         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
696         KnownBits BKnown(BitWidth);
697         computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
698 
699         // For those bits in B that are known to be zero, we can propagate known
700         // bits from the RHS to V. For those bits in B that are known to be one,
701         // we can propagate inverted known bits from the RHS to V.
702         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
703         Known.One  |= RHSKnown.One  & BKnown.Zero;
704         Known.Zero |= RHSKnown.One  & BKnown.One;
705         Known.One  |= RHSKnown.Zero & BKnown.One;
706       // assume(~(v ^ b) = a)
707       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
708                                      m_Value(A))) &&
709                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
710         KnownBits RHSKnown(BitWidth);
711         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
712         KnownBits BKnown(BitWidth);
713         computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
714 
715         // For those bits in B that are known to be zero, we can propagate
716         // inverted known bits from the RHS to V. For those bits in B that are
717         // known to be one, we can propagate known bits from the RHS to V.
718         Known.Zero |= RHSKnown.One  & BKnown.Zero;
719         Known.One  |= RHSKnown.Zero & BKnown.Zero;
720         Known.Zero |= RHSKnown.Zero & BKnown.One;
721         Known.One  |= RHSKnown.One  & BKnown.One;
722       // assume(v << c = a)
723       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
724                                      m_Value(A))) &&
725                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
726         KnownBits RHSKnown(BitWidth);
727         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
728         // For those bits in RHS that are known, we can propagate them to known
729         // bits in V shifted to the right by C.
730         RHSKnown.Zero.lshrInPlace(C);
731         Known.Zero |= RHSKnown.Zero;
732         RHSKnown.One.lshrInPlace(C);
733         Known.One  |= RHSKnown.One;
734       // assume(~(v << c) = a)
735       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
736                                      m_Value(A))) &&
737                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
738         KnownBits RHSKnown(BitWidth);
739         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
740         // For those bits in RHS that are known, we can propagate them inverted
741         // to known bits in V shifted to the right by C.
742         RHSKnown.One.lshrInPlace(C);
743         Known.Zero |= RHSKnown.One;
744         RHSKnown.Zero.lshrInPlace(C);
745         Known.One  |= RHSKnown.Zero;
746       // assume(v >> c = a)
747       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
748                                      m_Value(A))) &&
749                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
750         KnownBits RHSKnown(BitWidth);
751         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
752         // For those bits in RHS that are known, we can propagate them to known
753         // bits in V shifted to the right by C.
754         Known.Zero |= RHSKnown.Zero << C;
755         Known.One  |= RHSKnown.One  << C;
756       // assume(~(v >> c) = a)
757       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
758                                      m_Value(A))) &&
759                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
760         KnownBits RHSKnown(BitWidth);
761         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
762         // For those bits in RHS that are known, we can propagate them inverted
763         // to known bits in V shifted to the right by C.
764         Known.Zero |= RHSKnown.One  << C;
765         Known.One  |= RHSKnown.Zero << C;
766       }
767       break;
768     case ICmpInst::ICMP_SGE:
769       // assume(v >=_s c) where c is non-negative
770       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
771           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
772         KnownBits RHSKnown(BitWidth);
773         computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
774 
775         if (RHSKnown.isNonNegative()) {
776           // We know that the sign bit is zero.
777           Known.makeNonNegative();
778         }
779       }
780       break;
781     case ICmpInst::ICMP_SGT:
782       // assume(v >_s c) where c is at least -1.
783       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
784           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
785         KnownBits RHSKnown(BitWidth);
786         computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
787 
788         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
789           // We know that the sign bit is zero.
790           Known.makeNonNegative();
791         }
792       }
793       break;
794     case ICmpInst::ICMP_SLE:
795       // assume(v <=_s c) where c is negative
796       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
797           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
798         KnownBits RHSKnown(BitWidth);
799         computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
800 
801         if (RHSKnown.isNegative()) {
802           // We know that the sign bit is one.
803           Known.makeNegative();
804         }
805       }
806       break;
807     case ICmpInst::ICMP_SLT:
808       // assume(v <_s c) where c is non-positive
809       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
810           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
811         KnownBits RHSKnown(BitWidth);
812         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
813 
814         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
815           // We know that the sign bit is one.
816           Known.makeNegative();
817         }
818       }
819       break;
820     case ICmpInst::ICMP_ULE:
821       // assume(v <=_u c)
822       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
823           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
824         KnownBits RHSKnown(BitWidth);
825         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
826 
827         // Whatever high bits in c are zero are known to be zero.
828         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
829       }
830       break;
831     case ICmpInst::ICMP_ULT:
832       // assume(v <_u c)
833       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
834           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
835         KnownBits RHSKnown(BitWidth);
836         computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
837 
838         // If the RHS is known zero, then this assumption must be wrong (nothing
839         // is unsigned less than zero). Signal a conflict and get out of here.
840         if (RHSKnown.isZero()) {
841           Known.Zero.setAllBits();
842           Known.One.setAllBits();
843           break;
844         }
845 
846         // Whatever high bits in c are zero are known to be zero (if c is a power
847         // of 2, then one more).
848         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
849           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
850         else
851           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
852       }
853       break;
854     }
855   }
856 
857   // If assumptions conflict with each other or previous known bits, then we
858   // have a logical fallacy. It's possible that the assumption is not reachable,
859   // so this isn't a real bug. On the other hand, the program may have undefined
860   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
861   // clear out the known bits, try to warn the user, and hope for the best.
862   if (Known.Zero.intersects(Known.One)) {
863     Known.resetAll();
864 
865     if (Q.ORE)
866       Q.ORE->emit([&]() {
867         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
868         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
869                                           CxtI)
870                << "Detected conflicting code assumptions. Program may "
871                   "have undefined behavior, or compiler may have "
872                   "internal error.";
873       });
874   }
875 }
876 
877 /// Compute known bits from a shift operator, including those with a
878 /// non-constant shift amount. Known is the output of this function. Known2 is a
879 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
880 /// operator-specific functions that, given the known-zero or known-one bits
881 /// respectively, and a shift amount, compute the implied known-zero or
882 /// known-one bits of the shift operator's result respectively for that shift
883 /// amount. The results from calling KZF and KOF are conservatively combined for
884 /// all permitted shift amounts.
885 static void computeKnownBitsFromShiftOperator(
886     const Operator *I, KnownBits &Known, KnownBits &Known2,
887     unsigned Depth, const Query &Q,
888     function_ref<APInt(const APInt &, unsigned)> KZF,
889     function_ref<APInt(const APInt &, unsigned)> KOF) {
890   unsigned BitWidth = Known.getBitWidth();
891 
892   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
893     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
894 
895     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
896     Known.Zero = KZF(Known.Zero, ShiftAmt);
897     Known.One  = KOF(Known.One, ShiftAmt);
898     // If the known bits conflict, this must be an overflowing left shift, so
899     // the shift result is poison. We can return anything we want. Choose 0 for
900     // the best folding opportunity.
901     if (Known.hasConflict())
902       Known.setAllZero();
903 
904     return;
905   }
906 
907   computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
908 
909   // If the shift amount could be greater than or equal to the bit-width of the
910   // LHS, the value could be poison, but bail out because the check below is
911   // expensive. TODO: Should we just carry on?
912   if ((~Known.Zero).uge(BitWidth)) {
913     Known.resetAll();
914     return;
915   }
916 
917   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
918   // BitWidth > 64 and any upper bits are known, we'll end up returning the
919   // limit value (which implies all bits are known).
920   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
921   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
922 
923   // It would be more-clearly correct to use the two temporaries for this
924   // calculation. Reusing the APInts here to prevent unnecessary allocations.
925   Known.resetAll();
926 
927   // If we know the shifter operand is nonzero, we can sometimes infer more
928   // known bits. However this is expensive to compute, so be lazy about it and
929   // only compute it when absolutely necessary.
930   Optional<bool> ShifterOperandIsNonZero;
931 
932   // Early exit if we can't constrain any well-defined shift amount.
933   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
934       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
935     ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
936     if (!*ShifterOperandIsNonZero)
937       return;
938   }
939 
940   computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
941 
942   Known.Zero.setAllBits();
943   Known.One.setAllBits();
944   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
945     // Combine the shifted known input bits only for those shift amounts
946     // compatible with its known constraints.
947     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
948       continue;
949     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
950       continue;
951     // If we know the shifter is nonzero, we may be able to infer more known
952     // bits. This check is sunk down as far as possible to avoid the expensive
953     // call to isKnownNonZero if the cheaper checks above fail.
954     if (ShiftAmt == 0) {
955       if (!ShifterOperandIsNonZero.hasValue())
956         ShifterOperandIsNonZero =
957             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
958       if (*ShifterOperandIsNonZero)
959         continue;
960     }
961 
962     Known.Zero &= KZF(Known2.Zero, ShiftAmt);
963     Known.One  &= KOF(Known2.One, ShiftAmt);
964   }
965 
966   // If the known bits conflict, the result is poison. Return a 0 and hope the
967   // caller can further optimize that.
968   if (Known.hasConflict())
969     Known.setAllZero();
970 }
971 
972 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
973                                          unsigned Depth, const Query &Q) {
974   unsigned BitWidth = Known.getBitWidth();
975 
976   KnownBits Known2(Known);
977   switch (I->getOpcode()) {
978   default: break;
979   case Instruction::Load:
980     if (MDNode *MD =
981             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
982       computeKnownBitsFromRangeMetadata(*MD, Known);
983     break;
984   case Instruction::And: {
985     // If either the LHS or the RHS are Zero, the result is zero.
986     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
987     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
988 
989     // Output known-1 bits are only known if set in both the LHS & RHS.
990     Known.One &= Known2.One;
991     // Output known-0 are known to be clear if zero in either the LHS | RHS.
992     Known.Zero |= Known2.Zero;
993 
994     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
995     // here we handle the more general case of adding any odd number by
996     // matching the form add(x, add(x, y)) where y is odd.
997     // TODO: This could be generalized to clearing any bit set in y where the
998     // following bit is known to be unset in y.
999     Value *X = nullptr, *Y = nullptr;
1000     if (!Known.Zero[0] && !Known.One[0] &&
1001         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1002       Known2.resetAll();
1003       computeKnownBits(Y, Known2, Depth + 1, Q);
1004       if (Known2.countMinTrailingOnes() > 0)
1005         Known.Zero.setBit(0);
1006     }
1007     break;
1008   }
1009   case Instruction::Or:
1010     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1011     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1012 
1013     // Output known-0 bits are only known if clear in both the LHS & RHS.
1014     Known.Zero &= Known2.Zero;
1015     // Output known-1 are known to be set if set in either the LHS | RHS.
1016     Known.One |= Known2.One;
1017     break;
1018   case Instruction::Xor: {
1019     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1020     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1021 
1022     // Output known-0 bits are known if clear or set in both the LHS & RHS.
1023     APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1024     // Output known-1 are known to be set if set in only one of the LHS, RHS.
1025     Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1026     Known.Zero = std::move(KnownZeroOut);
1027     break;
1028   }
1029   case Instruction::Mul: {
1030     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1031     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1032                         Known2, Depth, Q);
1033     break;
1034   }
1035   case Instruction::UDiv: {
1036     // For the purposes of computing leading zeros we can conservatively
1037     // treat a udiv as a logical right shift by the power of 2 known to
1038     // be less than the denominator.
1039     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1040     unsigned LeadZ = Known2.countMinLeadingZeros();
1041 
1042     Known2.resetAll();
1043     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1044     unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1045     if (RHSMaxLeadingZeros != BitWidth)
1046       LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1047 
1048     Known.Zero.setHighBits(LeadZ);
1049     break;
1050   }
1051   case Instruction::Select: {
1052     const Value *LHS, *RHS;
1053     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1054     if (SelectPatternResult::isMinOrMax(SPF)) {
1055       computeKnownBits(RHS, Known, Depth + 1, Q);
1056       computeKnownBits(LHS, Known2, Depth + 1, Q);
1057     } else {
1058       computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1059       computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1060     }
1061 
1062     unsigned MaxHighOnes = 0;
1063     unsigned MaxHighZeros = 0;
1064     if (SPF == SPF_SMAX) {
1065       // If both sides are negative, the result is negative.
1066       if (Known.isNegative() && Known2.isNegative())
1067         // We can derive a lower bound on the result by taking the max of the
1068         // leading one bits.
1069         MaxHighOnes =
1070             std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1071       // If either side is non-negative, the result is non-negative.
1072       else if (Known.isNonNegative() || Known2.isNonNegative())
1073         MaxHighZeros = 1;
1074     } else if (SPF == SPF_SMIN) {
1075       // If both sides are non-negative, the result is non-negative.
1076       if (Known.isNonNegative() && Known2.isNonNegative())
1077         // We can derive an upper bound on the result by taking the max of the
1078         // leading zero bits.
1079         MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1080                                 Known2.countMinLeadingZeros());
1081       // If either side is negative, the result is negative.
1082       else if (Known.isNegative() || Known2.isNegative())
1083         MaxHighOnes = 1;
1084     } else if (SPF == SPF_UMAX) {
1085       // We can derive a lower bound on the result by taking the max of the
1086       // leading one bits.
1087       MaxHighOnes =
1088           std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1089     } else if (SPF == SPF_UMIN) {
1090       // We can derive an upper bound on the result by taking the max of the
1091       // leading zero bits.
1092       MaxHighZeros =
1093           std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1094     } else if (SPF == SPF_ABS) {
1095       // RHS from matchSelectPattern returns the negation part of abs pattern.
1096       // If the negate has an NSW flag we can assume the sign bit of the result
1097       // will be 0 because that makes abs(INT_MIN) undefined.
1098       if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1099         MaxHighZeros = 1;
1100     }
1101 
1102     // Only known if known in both the LHS and RHS.
1103     Known.One &= Known2.One;
1104     Known.Zero &= Known2.Zero;
1105     if (MaxHighOnes > 0)
1106       Known.One.setHighBits(MaxHighOnes);
1107     if (MaxHighZeros > 0)
1108       Known.Zero.setHighBits(MaxHighZeros);
1109     break;
1110   }
1111   case Instruction::FPTrunc:
1112   case Instruction::FPExt:
1113   case Instruction::FPToUI:
1114   case Instruction::FPToSI:
1115   case Instruction::SIToFP:
1116   case Instruction::UIToFP:
1117     break; // Can't work with floating point.
1118   case Instruction::PtrToInt:
1119   case Instruction::IntToPtr:
1120     // Fall through and handle them the same as zext/trunc.
1121     LLVM_FALLTHROUGH;
1122   case Instruction::ZExt:
1123   case Instruction::Trunc: {
1124     Type *SrcTy = I->getOperand(0)->getType();
1125 
1126     unsigned SrcBitWidth;
1127     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1128     // which fall through here.
1129     Type *ScalarTy = SrcTy->getScalarType();
1130     SrcBitWidth = ScalarTy->isPointerTy() ?
1131       Q.DL.getIndexTypeSizeInBits(ScalarTy) :
1132       Q.DL.getTypeSizeInBits(ScalarTy);
1133 
1134     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1135     Known = Known.zextOrTrunc(SrcBitWidth, false);
1136     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1137     Known = Known.zextOrTrunc(BitWidth, true /* ExtendedBitsAreKnownZero */);
1138     break;
1139   }
1140   case Instruction::BitCast: {
1141     Type *SrcTy = I->getOperand(0)->getType();
1142     if (SrcTy->isIntOrPtrTy() &&
1143         // TODO: For now, not handling conversions like:
1144         // (bitcast i64 %x to <2 x i32>)
1145         !I->getType()->isVectorTy()) {
1146       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1147       break;
1148     }
1149     break;
1150   }
1151   case Instruction::SExt: {
1152     // Compute the bits in the result that are not present in the input.
1153     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1154 
1155     Known = Known.trunc(SrcBitWidth);
1156     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1157     // If the sign bit of the input is known set or clear, then we know the
1158     // top bits of the result.
1159     Known = Known.sext(BitWidth);
1160     break;
1161   }
1162   case Instruction::Shl: {
1163     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1164     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1165     auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1166       APInt KZResult = KnownZero << ShiftAmt;
1167       KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1168       // If this shift has "nsw" keyword, then the result is either a poison
1169       // value or has the same sign bit as the first operand.
1170       if (NSW && KnownZero.isSignBitSet())
1171         KZResult.setSignBit();
1172       return KZResult;
1173     };
1174 
1175     auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1176       APInt KOResult = KnownOne << ShiftAmt;
1177       if (NSW && KnownOne.isSignBitSet())
1178         KOResult.setSignBit();
1179       return KOResult;
1180     };
1181 
1182     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1183     break;
1184   }
1185   case Instruction::LShr: {
1186     // (lshr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1187     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1188       APInt KZResult = KnownZero.lshr(ShiftAmt);
1189       // High bits known zero.
1190       KZResult.setHighBits(ShiftAmt);
1191       return KZResult;
1192     };
1193 
1194     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1195       return KnownOne.lshr(ShiftAmt);
1196     };
1197 
1198     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1199     break;
1200   }
1201   case Instruction::AShr: {
1202     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1203     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1204       return KnownZero.ashr(ShiftAmt);
1205     };
1206 
1207     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1208       return KnownOne.ashr(ShiftAmt);
1209     };
1210 
1211     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1212     break;
1213   }
1214   case Instruction::Sub: {
1215     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1216     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1217                            Known, Known2, Depth, Q);
1218     break;
1219   }
1220   case Instruction::Add: {
1221     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1222     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1223                            Known, Known2, Depth, Q);
1224     break;
1225   }
1226   case Instruction::SRem:
1227     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1228       APInt RA = Rem->getValue().abs();
1229       if (RA.isPowerOf2()) {
1230         APInt LowBits = RA - 1;
1231         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1232 
1233         // The low bits of the first operand are unchanged by the srem.
1234         Known.Zero = Known2.Zero & LowBits;
1235         Known.One = Known2.One & LowBits;
1236 
1237         // If the first operand is non-negative or has all low bits zero, then
1238         // the upper bits are all zero.
1239         if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1240           Known.Zero |= ~LowBits;
1241 
1242         // If the first operand is negative and not all low bits are zero, then
1243         // the upper bits are all one.
1244         if (Known2.isNegative() && LowBits.intersects(Known2.One))
1245           Known.One |= ~LowBits;
1246 
1247         assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1248         break;
1249       }
1250     }
1251 
1252     // The sign bit is the LHS's sign bit, except when the result of the
1253     // remainder is zero.
1254     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1255     // If it's known zero, our sign bit is also zero.
1256     if (Known2.isNonNegative())
1257       Known.makeNonNegative();
1258 
1259     break;
1260   case Instruction::URem: {
1261     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1262       const APInt &RA = Rem->getValue();
1263       if (RA.isPowerOf2()) {
1264         APInt LowBits = (RA - 1);
1265         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1266         Known.Zero |= ~LowBits;
1267         Known.One &= LowBits;
1268         break;
1269       }
1270     }
1271 
1272     // Since the result is less than or equal to either operand, any leading
1273     // zero bits in either operand must also exist in the result.
1274     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1275     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1276 
1277     unsigned Leaders =
1278         std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1279     Known.resetAll();
1280     Known.Zero.setHighBits(Leaders);
1281     break;
1282   }
1283 
1284   case Instruction::Alloca: {
1285     const AllocaInst *AI = cast<AllocaInst>(I);
1286     unsigned Align = AI->getAlignment();
1287     if (Align == 0)
1288       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1289 
1290     if (Align > 0)
1291       Known.Zero.setLowBits(countTrailingZeros(Align));
1292     break;
1293   }
1294   case Instruction::GetElementPtr: {
1295     // Analyze all of the subscripts of this getelementptr instruction
1296     // to determine if we can prove known low zero bits.
1297     KnownBits LocalKnown(BitWidth);
1298     computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1299     unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1300 
1301     gep_type_iterator GTI = gep_type_begin(I);
1302     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1303       Value *Index = I->getOperand(i);
1304       if (StructType *STy = GTI.getStructTypeOrNull()) {
1305         // Handle struct member offset arithmetic.
1306 
1307         // Handle case when index is vector zeroinitializer
1308         Constant *CIndex = cast<Constant>(Index);
1309         if (CIndex->isZeroValue())
1310           continue;
1311 
1312         if (CIndex->getType()->isVectorTy())
1313           Index = CIndex->getSplatValue();
1314 
1315         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1316         const StructLayout *SL = Q.DL.getStructLayout(STy);
1317         uint64_t Offset = SL->getElementOffset(Idx);
1318         TrailZ = std::min<unsigned>(TrailZ,
1319                                     countTrailingZeros(Offset));
1320       } else {
1321         // Handle array index arithmetic.
1322         Type *IndexedTy = GTI.getIndexedType();
1323         if (!IndexedTy->isSized()) {
1324           TrailZ = 0;
1325           break;
1326         }
1327         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1328         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1329         LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1330         computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1331         TrailZ = std::min(TrailZ,
1332                           unsigned(countTrailingZeros(TypeSize) +
1333                                    LocalKnown.countMinTrailingZeros()));
1334       }
1335     }
1336 
1337     Known.Zero.setLowBits(TrailZ);
1338     break;
1339   }
1340   case Instruction::PHI: {
1341     const PHINode *P = cast<PHINode>(I);
1342     // Handle the case of a simple two-predecessor recurrence PHI.
1343     // There's a lot more that could theoretically be done here, but
1344     // this is sufficient to catch some interesting cases.
1345     if (P->getNumIncomingValues() == 2) {
1346       for (unsigned i = 0; i != 2; ++i) {
1347         Value *L = P->getIncomingValue(i);
1348         Value *R = P->getIncomingValue(!i);
1349         Operator *LU = dyn_cast<Operator>(L);
1350         if (!LU)
1351           continue;
1352         unsigned Opcode = LU->getOpcode();
1353         // Check for operations that have the property that if
1354         // both their operands have low zero bits, the result
1355         // will have low zero bits.
1356         if (Opcode == Instruction::Add ||
1357             Opcode == Instruction::Sub ||
1358             Opcode == Instruction::And ||
1359             Opcode == Instruction::Or ||
1360             Opcode == Instruction::Mul) {
1361           Value *LL = LU->getOperand(0);
1362           Value *LR = LU->getOperand(1);
1363           // Find a recurrence.
1364           if (LL == I)
1365             L = LR;
1366           else if (LR == I)
1367             L = LL;
1368           else
1369             break;
1370           // Ok, we have a PHI of the form L op= R. Check for low
1371           // zero bits.
1372           computeKnownBits(R, Known2, Depth + 1, Q);
1373 
1374           // We need to take the minimum number of known bits
1375           KnownBits Known3(Known);
1376           computeKnownBits(L, Known3, Depth + 1, Q);
1377 
1378           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1379                                          Known3.countMinTrailingZeros()));
1380 
1381           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1382           if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1383             // If initial value of recurrence is nonnegative, and we are adding
1384             // a nonnegative number with nsw, the result can only be nonnegative
1385             // or poison value regardless of the number of times we execute the
1386             // add in phi recurrence. If initial value is negative and we are
1387             // adding a negative number with nsw, the result can only be
1388             // negative or poison value. Similar arguments apply to sub and mul.
1389             //
1390             // (add non-negative, non-negative) --> non-negative
1391             // (add negative, negative) --> negative
1392             if (Opcode == Instruction::Add) {
1393               if (Known2.isNonNegative() && Known3.isNonNegative())
1394                 Known.makeNonNegative();
1395               else if (Known2.isNegative() && Known3.isNegative())
1396                 Known.makeNegative();
1397             }
1398 
1399             // (sub nsw non-negative, negative) --> non-negative
1400             // (sub nsw negative, non-negative) --> negative
1401             else if (Opcode == Instruction::Sub && LL == I) {
1402               if (Known2.isNonNegative() && Known3.isNegative())
1403                 Known.makeNonNegative();
1404               else if (Known2.isNegative() && Known3.isNonNegative())
1405                 Known.makeNegative();
1406             }
1407 
1408             // (mul nsw non-negative, non-negative) --> non-negative
1409             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1410                      Known3.isNonNegative())
1411               Known.makeNonNegative();
1412           }
1413 
1414           break;
1415         }
1416       }
1417     }
1418 
1419     // Unreachable blocks may have zero-operand PHI nodes.
1420     if (P->getNumIncomingValues() == 0)
1421       break;
1422 
1423     // Otherwise take the unions of the known bit sets of the operands,
1424     // taking conservative care to avoid excessive recursion.
1425     if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1426       // Skip if every incoming value references to ourself.
1427       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1428         break;
1429 
1430       Known.Zero.setAllBits();
1431       Known.One.setAllBits();
1432       for (Value *IncValue : P->incoming_values()) {
1433         // Skip direct self references.
1434         if (IncValue == P) continue;
1435 
1436         Known2 = KnownBits(BitWidth);
1437         // Recurse, but cap the recursion to one level, because we don't
1438         // want to waste time spinning around in loops.
1439         computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1440         Known.Zero &= Known2.Zero;
1441         Known.One &= Known2.One;
1442         // If all bits have been ruled out, there's no need to check
1443         // more operands.
1444         if (!Known.Zero && !Known.One)
1445           break;
1446       }
1447     }
1448     break;
1449   }
1450   case Instruction::Call:
1451   case Instruction::Invoke:
1452     // If range metadata is attached to this call, set known bits from that,
1453     // and then intersect with known bits based on other properties of the
1454     // function.
1455     if (MDNode *MD =
1456             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1457       computeKnownBitsFromRangeMetadata(*MD, Known);
1458     if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1459       computeKnownBits(RV, Known2, Depth + 1, Q);
1460       Known.Zero |= Known2.Zero;
1461       Known.One |= Known2.One;
1462     }
1463     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1464       switch (II->getIntrinsicID()) {
1465       default: break;
1466       case Intrinsic::bitreverse:
1467         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1468         Known.Zero |= Known2.Zero.reverseBits();
1469         Known.One |= Known2.One.reverseBits();
1470         break;
1471       case Intrinsic::bswap:
1472         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1473         Known.Zero |= Known2.Zero.byteSwap();
1474         Known.One |= Known2.One.byteSwap();
1475         break;
1476       case Intrinsic::ctlz: {
1477         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1478         // If we have a known 1, its position is our upper bound.
1479         unsigned PossibleLZ = Known2.One.countLeadingZeros();
1480         // If this call is undefined for 0, the result will be less than 2^n.
1481         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1482           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1483         unsigned LowBits = Log2_32(PossibleLZ)+1;
1484         Known.Zero.setBitsFrom(LowBits);
1485         break;
1486       }
1487       case Intrinsic::cttz: {
1488         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1489         // If we have a known 1, its position is our upper bound.
1490         unsigned PossibleTZ = Known2.One.countTrailingZeros();
1491         // If this call is undefined for 0, the result will be less than 2^n.
1492         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1493           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1494         unsigned LowBits = Log2_32(PossibleTZ)+1;
1495         Known.Zero.setBitsFrom(LowBits);
1496         break;
1497       }
1498       case Intrinsic::ctpop: {
1499         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1500         // We can bound the space the count needs.  Also, bits known to be zero
1501         // can't contribute to the population.
1502         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1503         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1504         Known.Zero.setBitsFrom(LowBits);
1505         // TODO: we could bound KnownOne using the lower bound on the number
1506         // of bits which might be set provided by popcnt KnownOne2.
1507         break;
1508       }
1509       case Intrinsic::fshr:
1510       case Intrinsic::fshl: {
1511         const APInt *SA;
1512         if (!match(I->getOperand(2), m_APInt(SA)))
1513           break;
1514 
1515         // Normalize to funnel shift left.
1516         uint64_t ShiftAmt = SA->urem(BitWidth);
1517         if (II->getIntrinsicID() == Intrinsic::fshr)
1518           ShiftAmt = BitWidth - ShiftAmt;
1519 
1520         KnownBits Known3(Known);
1521         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1522         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1523 
1524         Known.Zero =
1525             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1526         Known.One =
1527             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1528         break;
1529       }
1530       case Intrinsic::uadd_sat:
1531       case Intrinsic::usub_sat: {
1532         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1533         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1534         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1535 
1536         // Add: Leading ones of either operand are preserved.
1537         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1538         // as leading zeros in the result.
1539         unsigned LeadingKnown;
1540         if (IsAdd)
1541           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1542                                   Known2.countMinLeadingOnes());
1543         else
1544           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1545                                   Known2.countMinLeadingOnes());
1546 
1547         Known = KnownBits::computeForAddSub(
1548             IsAdd, /* NSW */ false, Known, Known2);
1549 
1550         // We select between the operation result and all-ones/zero
1551         // respectively, so we can preserve known ones/zeros.
1552         if (IsAdd) {
1553           Known.One.setHighBits(LeadingKnown);
1554           Known.Zero.clearAllBits();
1555         } else {
1556           Known.Zero.setHighBits(LeadingKnown);
1557           Known.One.clearAllBits();
1558         }
1559         break;
1560       }
1561       case Intrinsic::x86_sse42_crc32_64_64:
1562         Known.Zero.setBitsFrom(32);
1563         break;
1564       }
1565     }
1566     break;
1567   case Instruction::ExtractElement:
1568     // Look through extract element. At the moment we keep this simple and skip
1569     // tracking the specific element. But at least we might find information
1570     // valid for all elements of the vector (for example if vector is sign
1571     // extended, shifted, etc).
1572     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1573     break;
1574   case Instruction::ExtractValue:
1575     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1576       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1577       if (EVI->getNumIndices() != 1) break;
1578       if (EVI->getIndices()[0] == 0) {
1579         switch (II->getIntrinsicID()) {
1580         default: break;
1581         case Intrinsic::uadd_with_overflow:
1582         case Intrinsic::sadd_with_overflow:
1583           computeKnownBitsAddSub(true, II->getArgOperand(0),
1584                                  II->getArgOperand(1), false, Known, Known2,
1585                                  Depth, Q);
1586           break;
1587         case Intrinsic::usub_with_overflow:
1588         case Intrinsic::ssub_with_overflow:
1589           computeKnownBitsAddSub(false, II->getArgOperand(0),
1590                                  II->getArgOperand(1), false, Known, Known2,
1591                                  Depth, Q);
1592           break;
1593         case Intrinsic::umul_with_overflow:
1594         case Intrinsic::smul_with_overflow:
1595           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1596                               Known, Known2, Depth, Q);
1597           break;
1598         }
1599       }
1600     }
1601   }
1602 }
1603 
1604 /// Determine which bits of V are known to be either zero or one and return
1605 /// them.
1606 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1607   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1608   computeKnownBits(V, Known, Depth, Q);
1609   return Known;
1610 }
1611 
1612 /// Determine which bits of V are known to be either zero or one and return
1613 /// them in the Known bit set.
1614 ///
1615 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1616 /// we cannot optimize based on the assumption that it is zero without changing
1617 /// it to be an explicit zero.  If we don't change it to zero, other code could
1618 /// optimized based on the contradictory assumption that it is non-zero.
1619 /// Because instcombine aggressively folds operations with undef args anyway,
1620 /// this won't lose us code quality.
1621 ///
1622 /// This function is defined on values with integer type, values with pointer
1623 /// type, and vectors of integers.  In the case
1624 /// where V is a vector, known zero, and known one values are the
1625 /// same width as the vector element, and the bit is set only if it is true
1626 /// for all of the elements in the vector.
1627 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1628                       const Query &Q) {
1629   assert(V && "No Value?");
1630   assert(Depth <= MaxDepth && "Limit Search Depth");
1631   unsigned BitWidth = Known.getBitWidth();
1632 
1633   assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1634           V->getType()->isPtrOrPtrVectorTy()) &&
1635          "Not integer or pointer type!");
1636 
1637   Type *ScalarTy = V->getType()->getScalarType();
1638   unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1639     Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1640   assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
1641   (void)BitWidth;
1642   (void)ExpectedWidth;
1643 
1644   const APInt *C;
1645   if (match(V, m_APInt(C))) {
1646     // We know all of the bits for a scalar constant or a splat vector constant!
1647     Known.One = *C;
1648     Known.Zero = ~Known.One;
1649     return;
1650   }
1651   // Null and aggregate-zero are all-zeros.
1652   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1653     Known.setAllZero();
1654     return;
1655   }
1656   // Handle a constant vector by taking the intersection of the known bits of
1657   // each element.
1658   if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1659     // We know that CDS must be a vector of integers. Take the intersection of
1660     // each element.
1661     Known.Zero.setAllBits(); Known.One.setAllBits();
1662     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1663       APInt Elt = CDS->getElementAsAPInt(i);
1664       Known.Zero &= ~Elt;
1665       Known.One &= Elt;
1666     }
1667     return;
1668   }
1669 
1670   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1671     // We know that CV must be a vector of integers. Take the intersection of
1672     // each element.
1673     Known.Zero.setAllBits(); Known.One.setAllBits();
1674     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1675       Constant *Element = CV->getAggregateElement(i);
1676       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1677       if (!ElementCI) {
1678         Known.resetAll();
1679         return;
1680       }
1681       const APInt &Elt = ElementCI->getValue();
1682       Known.Zero &= ~Elt;
1683       Known.One &= Elt;
1684     }
1685     return;
1686   }
1687 
1688   // Start out not knowing anything.
1689   Known.resetAll();
1690 
1691   // We can't imply anything about undefs.
1692   if (isa<UndefValue>(V))
1693     return;
1694 
1695   // There's no point in looking through other users of ConstantData for
1696   // assumptions.  Confirm that we've handled them all.
1697   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1698 
1699   // Limit search depth.
1700   // All recursive calls that increase depth must come after this.
1701   if (Depth == MaxDepth)
1702     return;
1703 
1704   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1705   // the bits of its aliasee.
1706   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1707     if (!GA->isInterposable())
1708       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1709     return;
1710   }
1711 
1712   if (const Operator *I = dyn_cast<Operator>(V))
1713     computeKnownBitsFromOperator(I, Known, Depth, Q);
1714 
1715   // Aligned pointers have trailing zeros - refine Known.Zero set
1716   if (V->getType()->isPointerTy()) {
1717     unsigned Align = V->getPointerAlignment(Q.DL);
1718     if (Align)
1719       Known.Zero.setLowBits(countTrailingZeros(Align));
1720   }
1721 
1722   // computeKnownBitsFromAssume strictly refines Known.
1723   // Therefore, we run them after computeKnownBitsFromOperator.
1724 
1725   // Check whether a nearby assume intrinsic can determine some known bits.
1726   computeKnownBitsFromAssume(V, Known, Depth, Q);
1727 
1728   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1729 }
1730 
1731 /// Return true if the given value is known to have exactly one
1732 /// bit set when defined. For vectors return true if every element is known to
1733 /// be a power of two when defined. Supports values with integer or pointer
1734 /// types and vectors of integers.
1735 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1736                             const Query &Q) {
1737   assert(Depth <= MaxDepth && "Limit Search Depth");
1738 
1739   // Attempt to match against constants.
1740   if (OrZero && match(V, m_Power2OrZero()))
1741       return true;
1742   if (match(V, m_Power2()))
1743       return true;
1744 
1745   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1746   // it is shifted off the end then the result is undefined.
1747   if (match(V, m_Shl(m_One(), m_Value())))
1748     return true;
1749 
1750   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1751   // the bottom.  If it is shifted off the bottom then the result is undefined.
1752   if (match(V, m_LShr(m_SignMask(), m_Value())))
1753     return true;
1754 
1755   // The remaining tests are all recursive, so bail out if we hit the limit.
1756   if (Depth++ == MaxDepth)
1757     return false;
1758 
1759   Value *X = nullptr, *Y = nullptr;
1760   // A shift left or a logical shift right of a power of two is a power of two
1761   // or zero.
1762   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1763                  match(V, m_LShr(m_Value(X), m_Value()))))
1764     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1765 
1766   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1767     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1768 
1769   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1770     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1771            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1772 
1773   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1774     // A power of two and'd with anything is a power of two or zero.
1775     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1776         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1777       return true;
1778     // X & (-X) is always a power of two or zero.
1779     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1780       return true;
1781     return false;
1782   }
1783 
1784   // Adding a power-of-two or zero to the same power-of-two or zero yields
1785   // either the original power-of-two, a larger power-of-two or zero.
1786   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1787     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1788     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1789         Q.IIQ.hasNoSignedWrap(VOBO)) {
1790       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1791           match(X, m_And(m_Value(), m_Specific(Y))))
1792         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1793           return true;
1794       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1795           match(Y, m_And(m_Value(), m_Specific(X))))
1796         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1797           return true;
1798 
1799       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1800       KnownBits LHSBits(BitWidth);
1801       computeKnownBits(X, LHSBits, Depth, Q);
1802 
1803       KnownBits RHSBits(BitWidth);
1804       computeKnownBits(Y, RHSBits, Depth, Q);
1805       // If i8 V is a power of two or zero:
1806       //  ZeroBits: 1 1 1 0 1 1 1 1
1807       // ~ZeroBits: 0 0 0 1 0 0 0 0
1808       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1809         // If OrZero isn't set, we cannot give back a zero result.
1810         // Make sure either the LHS or RHS has a bit set.
1811         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1812           return true;
1813     }
1814   }
1815 
1816   // An exact divide or right shift can only shift off zero bits, so the result
1817   // is a power of two only if the first operand is a power of two and not
1818   // copying a sign bit (sdiv int_min, 2).
1819   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1820       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1821     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1822                                   Depth, Q);
1823   }
1824 
1825   return false;
1826 }
1827 
1828 /// Test whether a GEP's result is known to be non-null.
1829 ///
1830 /// Uses properties inherent in a GEP to try to determine whether it is known
1831 /// to be non-null.
1832 ///
1833 /// Currently this routine does not support vector GEPs.
1834 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1835                               const Query &Q) {
1836   const Function *F = nullptr;
1837   if (const Instruction *I = dyn_cast<Instruction>(GEP))
1838     F = I->getFunction();
1839 
1840   if (!GEP->isInBounds() ||
1841       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
1842     return false;
1843 
1844   // FIXME: Support vector-GEPs.
1845   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1846 
1847   // If the base pointer is non-null, we cannot walk to a null address with an
1848   // inbounds GEP in address space zero.
1849   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1850     return true;
1851 
1852   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1853   // If so, then the GEP cannot produce a null pointer, as doing so would
1854   // inherently violate the inbounds contract within address space zero.
1855   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1856        GTI != GTE; ++GTI) {
1857     // Struct types are easy -- they must always be indexed by a constant.
1858     if (StructType *STy = GTI.getStructTypeOrNull()) {
1859       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1860       unsigned ElementIdx = OpC->getZExtValue();
1861       const StructLayout *SL = Q.DL.getStructLayout(STy);
1862       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1863       if (ElementOffset > 0)
1864         return true;
1865       continue;
1866     }
1867 
1868     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1869     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1870       continue;
1871 
1872     // Fast path the constant operand case both for efficiency and so we don't
1873     // increment Depth when just zipping down an all-constant GEP.
1874     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1875       if (!OpC->isZero())
1876         return true;
1877       continue;
1878     }
1879 
1880     // We post-increment Depth here because while isKnownNonZero increments it
1881     // as well, when we pop back up that increment won't persist. We don't want
1882     // to recurse 10k times just because we have 10k GEP operands. We don't
1883     // bail completely out because we want to handle constant GEPs regardless
1884     // of depth.
1885     if (Depth++ >= MaxDepth)
1886       continue;
1887 
1888     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1889       return true;
1890   }
1891 
1892   return false;
1893 }
1894 
1895 static bool isKnownNonNullFromDominatingCondition(const Value *V,
1896                                                   const Instruction *CtxI,
1897                                                   const DominatorTree *DT) {
1898   assert(V->getType()->isPointerTy() && "V must be pointer type");
1899   assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
1900 
1901   if (!CtxI || !DT)
1902     return false;
1903 
1904   unsigned NumUsesExplored = 0;
1905   for (auto *U : V->users()) {
1906     // Avoid massive lists
1907     if (NumUsesExplored >= DomConditionsMaxUses)
1908       break;
1909     NumUsesExplored++;
1910 
1911     // If the value is used as an argument to a call or invoke, then argument
1912     // attributes may provide an answer about null-ness.
1913     if (auto CS = ImmutableCallSite(U))
1914       if (auto *CalledFunc = CS.getCalledFunction())
1915         for (const Argument &Arg : CalledFunc->args())
1916           if (CS.getArgOperand(Arg.getArgNo()) == V &&
1917               Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1918             return true;
1919 
1920     // Consider only compare instructions uniquely controlling a branch
1921     CmpInst::Predicate Pred;
1922     if (!match(const_cast<User *>(U),
1923                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1924         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1925       continue;
1926 
1927     SmallVector<const User *, 4> WorkList;
1928     SmallPtrSet<const User *, 4> Visited;
1929     for (auto *CmpU : U->users()) {
1930       assert(WorkList.empty() && "Should be!");
1931       if (Visited.insert(CmpU).second)
1932         WorkList.push_back(CmpU);
1933 
1934       while (!WorkList.empty()) {
1935         auto *Curr = WorkList.pop_back_val();
1936 
1937         // If a user is an AND, add all its users to the work list. We only
1938         // propagate "pred != null" condition through AND because it is only
1939         // correct to assume that all conditions of AND are met in true branch.
1940         // TODO: Support similar logic of OR and EQ predicate?
1941         if (Pred == ICmpInst::ICMP_NE)
1942           if (auto *BO = dyn_cast<BinaryOperator>(Curr))
1943             if (BO->getOpcode() == Instruction::And) {
1944               for (auto *BOU : BO->users())
1945                 if (Visited.insert(BOU).second)
1946                   WorkList.push_back(BOU);
1947               continue;
1948             }
1949 
1950         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
1951           assert(BI->isConditional() && "uses a comparison!");
1952 
1953           BasicBlock *NonNullSuccessor =
1954               BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1955           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1956           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1957             return true;
1958         } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
1959                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
1960           return true;
1961         }
1962       }
1963     }
1964   }
1965 
1966   return false;
1967 }
1968 
1969 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1970 /// ensure that the value it's attached to is never Value?  'RangeType' is
1971 /// is the type of the value described by the range.
1972 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1973   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1974   assert(NumRanges >= 1);
1975   for (unsigned i = 0; i < NumRanges; ++i) {
1976     ConstantInt *Lower =
1977         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1978     ConstantInt *Upper =
1979         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1980     ConstantRange Range(Lower->getValue(), Upper->getValue());
1981     if (Range.contains(Value))
1982       return false;
1983   }
1984   return true;
1985 }
1986 
1987 /// Return true if the given value is known to be non-zero when defined. For
1988 /// vectors, return true if every element is known to be non-zero when
1989 /// defined. For pointers, if the context instruction and dominator tree are
1990 /// specified, perform context-sensitive analysis and return true if the
1991 /// pointer couldn't possibly be null at the specified instruction.
1992 /// Supports values with integer or pointer type and vectors of integers.
1993 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1994   if (auto *C = dyn_cast<Constant>(V)) {
1995     if (C->isNullValue())
1996       return false;
1997     if (isa<ConstantInt>(C))
1998       // Must be non-zero due to null test above.
1999       return true;
2000 
2001     // For constant vectors, check that all elements are undefined or known
2002     // non-zero to determine that the whole vector is known non-zero.
2003     if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
2004       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2005         Constant *Elt = C->getAggregateElement(i);
2006         if (!Elt || Elt->isNullValue())
2007           return false;
2008         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2009           return false;
2010       }
2011       return true;
2012     }
2013 
2014     // A global variable in address space 0 is non null unless extern weak
2015     // or an absolute symbol reference. Other address spaces may have null as a
2016     // valid address for a global, so we can't assume anything.
2017     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2018       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2019           GV->getType()->getAddressSpace() == 0)
2020         return true;
2021     } else
2022       return false;
2023   }
2024 
2025   if (auto *I = dyn_cast<Instruction>(V)) {
2026     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2027       // If the possible ranges don't contain zero, then the value is
2028       // definitely non-zero.
2029       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2030         const APInt ZeroValue(Ty->getBitWidth(), 0);
2031         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2032           return true;
2033       }
2034     }
2035   }
2036 
2037   // Some of the tests below are recursive, so bail out if we hit the limit.
2038   if (Depth++ >= MaxDepth)
2039     return false;
2040 
2041   // Check for pointer simplifications.
2042   if (V->getType()->isPointerTy()) {
2043     // Alloca never returns null, malloc might.
2044     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2045       return true;
2046 
2047     // A byval, inalloca, or nonnull argument is never null.
2048     if (const Argument *A = dyn_cast<Argument>(V))
2049       if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
2050         return true;
2051 
2052     // A Load tagged with nonnull metadata is never null.
2053     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2054       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2055         return true;
2056 
2057     if (const auto *Call = dyn_cast<CallBase>(V)) {
2058       if (Call->isReturnNonNull())
2059         return true;
2060       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call))
2061         return isKnownNonZero(RP, Depth, Q);
2062     }
2063   }
2064 
2065 
2066   // Check for recursive pointer simplifications.
2067   if (V->getType()->isPointerTy()) {
2068     if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2069       return true;
2070 
2071     // Look through bitcast operations, GEPs, and int2ptr instructions as they
2072     // do not alter the value, or at least not the nullness property of the
2073     // value, e.g., int2ptr is allowed to zero/sign extend the value.
2074     //
2075     // Note that we have to take special care to avoid looking through
2076     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2077     // as casts that can alter the value, e.g., AddrSpaceCasts.
2078     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2079       if (isGEPKnownNonNull(GEP, Depth, Q))
2080         return true;
2081 
2082     if (auto *BCO = dyn_cast<BitCastOperator>(V))
2083       return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2084 
2085     if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2086       if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
2087           Q.DL.getTypeSizeInBits(I2P->getDestTy()))
2088         return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2089   }
2090 
2091   // Similar to int2ptr above, we can look through ptr2int here if the cast
2092   // is a no-op or an extend and not a truncate.
2093   if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2094     if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
2095         Q.DL.getTypeSizeInBits(P2I->getDestTy()))
2096       return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2097 
2098   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2099 
2100   // X | Y != 0 if X != 0 or Y != 0.
2101   Value *X = nullptr, *Y = nullptr;
2102   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2103     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
2104 
2105   // ext X != 0 if X != 0.
2106   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2107     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2108 
2109   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2110   // if the lowest bit is shifted off the end.
2111   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2112     // shl nuw can't remove any non-zero bits.
2113     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2114     if (Q.IIQ.hasNoUnsignedWrap(BO))
2115       return isKnownNonZero(X, Depth, Q);
2116 
2117     KnownBits Known(BitWidth);
2118     computeKnownBits(X, Known, Depth, Q);
2119     if (Known.One[0])
2120       return true;
2121   }
2122   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2123   // defined if the sign bit is shifted off the end.
2124   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2125     // shr exact can only shift out zero bits.
2126     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2127     if (BO->isExact())
2128       return isKnownNonZero(X, Depth, Q);
2129 
2130     KnownBits Known = computeKnownBits(X, Depth, Q);
2131     if (Known.isNegative())
2132       return true;
2133 
2134     // If the shifter operand is a constant, and all of the bits shifted
2135     // out are known to be zero, and X is known non-zero then at least one
2136     // non-zero bit must remain.
2137     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2138       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2139       // Is there a known one in the portion not shifted out?
2140       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2141         return true;
2142       // Are all the bits to be shifted out known zero?
2143       if (Known.countMinTrailingZeros() >= ShiftVal)
2144         return isKnownNonZero(X, Depth, Q);
2145     }
2146   }
2147   // div exact can only produce a zero if the dividend is zero.
2148   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2149     return isKnownNonZero(X, Depth, Q);
2150   }
2151   // X + Y.
2152   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2153     KnownBits XKnown = computeKnownBits(X, Depth, Q);
2154     KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2155 
2156     // If X and Y are both non-negative (as signed values) then their sum is not
2157     // zero unless both X and Y are zero.
2158     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2159       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2160         return true;
2161 
2162     // If X and Y are both negative (as signed values) then their sum is not
2163     // zero unless both X and Y equal INT_MIN.
2164     if (XKnown.isNegative() && YKnown.isNegative()) {
2165       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2166       // The sign bit of X is set.  If some other bit is set then X is not equal
2167       // to INT_MIN.
2168       if (XKnown.One.intersects(Mask))
2169         return true;
2170       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2171       // to INT_MIN.
2172       if (YKnown.One.intersects(Mask))
2173         return true;
2174     }
2175 
2176     // The sum of a non-negative number and a power of two is not zero.
2177     if (XKnown.isNonNegative() &&
2178         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2179       return true;
2180     if (YKnown.isNonNegative() &&
2181         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2182       return true;
2183   }
2184   // X * Y.
2185   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2186     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2187     // If X and Y are non-zero then so is X * Y as long as the multiplication
2188     // does not overflow.
2189     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2190         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2191       return true;
2192   }
2193   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2194   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2195     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2196         isKnownNonZero(SI->getFalseValue(), Depth, Q))
2197       return true;
2198   }
2199   // PHI
2200   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2201     // Try and detect a recurrence that monotonically increases from a
2202     // starting value, as these are common as induction variables.
2203     if (PN->getNumIncomingValues() == 2) {
2204       Value *Start = PN->getIncomingValue(0);
2205       Value *Induction = PN->getIncomingValue(1);
2206       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2207         std::swap(Start, Induction);
2208       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2209         if (!C->isZero() && !C->isNegative()) {
2210           ConstantInt *X;
2211           if (Q.IIQ.UseInstrInfo &&
2212               (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2213                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2214               !X->isNegative())
2215             return true;
2216         }
2217       }
2218     }
2219     // Check if all incoming values are non-zero constant.
2220     bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2221       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2222     });
2223     if (AllNonZeroConstants)
2224       return true;
2225   }
2226 
2227   KnownBits Known(BitWidth);
2228   computeKnownBits(V, Known, Depth, Q);
2229   return Known.One != 0;
2230 }
2231 
2232 /// Return true if V2 == V1 + X, where X is known non-zero.
2233 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2234   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2235   if (!BO || BO->getOpcode() != Instruction::Add)
2236     return false;
2237   Value *Op = nullptr;
2238   if (V2 == BO->getOperand(0))
2239     Op = BO->getOperand(1);
2240   else if (V2 == BO->getOperand(1))
2241     Op = BO->getOperand(0);
2242   else
2243     return false;
2244   return isKnownNonZero(Op, 0, Q);
2245 }
2246 
2247 /// Return true if it is known that V1 != V2.
2248 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2249   if (V1 == V2)
2250     return false;
2251   if (V1->getType() != V2->getType())
2252     // We can't look through casts yet.
2253     return false;
2254   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2255     return true;
2256 
2257   if (V1->getType()->isIntOrIntVectorTy()) {
2258     // Are any known bits in V1 contradictory to known bits in V2? If V1
2259     // has a known zero where V2 has a known one, they must not be equal.
2260     KnownBits Known1 = computeKnownBits(V1, 0, Q);
2261     KnownBits Known2 = computeKnownBits(V2, 0, Q);
2262 
2263     if (Known1.Zero.intersects(Known2.One) ||
2264         Known2.Zero.intersects(Known1.One))
2265       return true;
2266   }
2267   return false;
2268 }
2269 
2270 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2271 /// simplify operations downstream. Mask is known to be zero for bits that V
2272 /// cannot have.
2273 ///
2274 /// This function is defined on values with integer type, values with pointer
2275 /// type, and vectors of integers.  In the case
2276 /// where V is a vector, the mask, known zero, and known one values are the
2277 /// same width as the vector element, and the bit is set only if it is true
2278 /// for all of the elements in the vector.
2279 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2280                        const Query &Q) {
2281   KnownBits Known(Mask.getBitWidth());
2282   computeKnownBits(V, Known, Depth, Q);
2283   return Mask.isSubsetOf(Known.Zero);
2284 }
2285 
2286 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2287 // Returns the input and lower/upper bounds.
2288 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2289                                 const APInt *&CLow, const APInt *&CHigh) {
2290   assert(isa<Operator>(Select) &&
2291          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2292          "Input should be a Select!");
2293 
2294   const Value *LHS, *RHS, *LHS2, *RHS2;
2295   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2296   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2297     return false;
2298 
2299   if (!match(RHS, m_APInt(CLow)))
2300     return false;
2301 
2302   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2303   if (getInverseMinMaxFlavor(SPF) != SPF2)
2304     return false;
2305 
2306   if (!match(RHS2, m_APInt(CHigh)))
2307     return false;
2308 
2309   if (SPF == SPF_SMIN)
2310     std::swap(CLow, CHigh);
2311 
2312   In = LHS2;
2313   return CLow->sle(*CHigh);
2314 }
2315 
2316 /// For vector constants, loop over the elements and find the constant with the
2317 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2318 /// or if any element was not analyzed; otherwise, return the count for the
2319 /// element with the minimum number of sign bits.
2320 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2321                                                  unsigned TyBits) {
2322   const auto *CV = dyn_cast<Constant>(V);
2323   if (!CV || !CV->getType()->isVectorTy())
2324     return 0;
2325 
2326   unsigned MinSignBits = TyBits;
2327   unsigned NumElts = CV->getType()->getVectorNumElements();
2328   for (unsigned i = 0; i != NumElts; ++i) {
2329     // If we find a non-ConstantInt, bail out.
2330     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2331     if (!Elt)
2332       return 0;
2333 
2334     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2335   }
2336 
2337   return MinSignBits;
2338 }
2339 
2340 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2341                                        const Query &Q);
2342 
2343 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2344                                    const Query &Q) {
2345   unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2346   assert(Result > 0 && "At least one sign bit needs to be present!");
2347   return Result;
2348 }
2349 
2350 /// Return the number of times the sign bit of the register is replicated into
2351 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2352 /// (itself), but other cases can give us information. For example, immediately
2353 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2354 /// other, so we return 3. For vectors, return the number of sign bits for the
2355 /// vector element with the minimum number of known sign bits.
2356 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2357                                        const Query &Q) {
2358   assert(Depth <= MaxDepth && "Limit Search Depth");
2359 
2360   // We return the minimum number of sign bits that are guaranteed to be present
2361   // in V, so for undef we have to conservatively return 1.  We don't have the
2362   // same behavior for poison though -- that's a FIXME today.
2363 
2364   Type *ScalarTy = V->getType()->getScalarType();
2365   unsigned TyBits = ScalarTy->isPointerTy() ?
2366     Q.DL.getIndexTypeSizeInBits(ScalarTy) :
2367     Q.DL.getTypeSizeInBits(ScalarTy);
2368 
2369   unsigned Tmp, Tmp2;
2370   unsigned FirstAnswer = 1;
2371 
2372   // Note that ConstantInt is handled by the general computeKnownBits case
2373   // below.
2374 
2375   if (Depth == MaxDepth)
2376     return 1;  // Limit search depth.
2377 
2378   const Operator *U = dyn_cast<Operator>(V);
2379   switch (Operator::getOpcode(V)) {
2380   default: break;
2381   case Instruction::SExt:
2382     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2383     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2384 
2385   case Instruction::SDiv: {
2386     const APInt *Denominator;
2387     // sdiv X, C -> adds log(C) sign bits.
2388     if (match(U->getOperand(1), m_APInt(Denominator))) {
2389 
2390       // Ignore non-positive denominator.
2391       if (!Denominator->isStrictlyPositive())
2392         break;
2393 
2394       // Calculate the incoming numerator bits.
2395       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2396 
2397       // Add floor(log(C)) bits to the numerator bits.
2398       return std::min(TyBits, NumBits + Denominator->logBase2());
2399     }
2400     break;
2401   }
2402 
2403   case Instruction::SRem: {
2404     const APInt *Denominator;
2405     // srem X, C -> we know that the result is within [-C+1,C) when C is a
2406     // positive constant.  This let us put a lower bound on the number of sign
2407     // bits.
2408     if (match(U->getOperand(1), m_APInt(Denominator))) {
2409 
2410       // Ignore non-positive denominator.
2411       if (!Denominator->isStrictlyPositive())
2412         break;
2413 
2414       // Calculate the incoming numerator bits. SRem by a positive constant
2415       // can't lower the number of sign bits.
2416       unsigned NumrBits =
2417           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2418 
2419       // Calculate the leading sign bit constraints by examining the
2420       // denominator.  Given that the denominator is positive, there are two
2421       // cases:
2422       //
2423       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
2424       //     (1 << ceilLogBase2(C)).
2425       //
2426       //  2. the numerator is negative.  Then the result range is (-C,0] and
2427       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2428       //
2429       // Thus a lower bound on the number of sign bits is `TyBits -
2430       // ceilLogBase2(C)`.
2431 
2432       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2433       return std::max(NumrBits, ResBits);
2434     }
2435     break;
2436   }
2437 
2438   case Instruction::AShr: {
2439     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2440     // ashr X, C   -> adds C sign bits.  Vectors too.
2441     const APInt *ShAmt;
2442     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2443       if (ShAmt->uge(TyBits))
2444         break;  // Bad shift.
2445       unsigned ShAmtLimited = ShAmt->getZExtValue();
2446       Tmp += ShAmtLimited;
2447       if (Tmp > TyBits) Tmp = TyBits;
2448     }
2449     return Tmp;
2450   }
2451   case Instruction::Shl: {
2452     const APInt *ShAmt;
2453     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2454       // shl destroys sign bits.
2455       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2456       if (ShAmt->uge(TyBits) ||      // Bad shift.
2457           ShAmt->uge(Tmp)) break;    // Shifted all sign bits out.
2458       Tmp2 = ShAmt->getZExtValue();
2459       return Tmp - Tmp2;
2460     }
2461     break;
2462   }
2463   case Instruction::And:
2464   case Instruction::Or:
2465   case Instruction::Xor:    // NOT is handled here.
2466     // Logical binary ops preserve the number of sign bits at the worst.
2467     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2468     if (Tmp != 1) {
2469       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2470       FirstAnswer = std::min(Tmp, Tmp2);
2471       // We computed what we know about the sign bits as our first
2472       // answer. Now proceed to the generic code that uses
2473       // computeKnownBits, and pick whichever answer is better.
2474     }
2475     break;
2476 
2477   case Instruction::Select: {
2478     // If we have a clamp pattern, we know that the number of sign bits will be
2479     // the minimum of the clamp min/max range.
2480     const Value *X;
2481     const APInt *CLow, *CHigh;
2482     if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2483       return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2484 
2485     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2486     if (Tmp == 1) break;
2487     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2488     return std::min(Tmp, Tmp2);
2489   }
2490 
2491   case Instruction::Add:
2492     // Add can have at most one carry bit.  Thus we know that the output
2493     // is, at worst, one more bit than the inputs.
2494     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2495     if (Tmp == 1) break;
2496 
2497     // Special case decrementing a value (ADD X, -1):
2498     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2499       if (CRHS->isAllOnesValue()) {
2500         KnownBits Known(TyBits);
2501         computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2502 
2503         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2504         // sign bits set.
2505         if ((Known.Zero | 1).isAllOnesValue())
2506           return TyBits;
2507 
2508         // If we are subtracting one from a positive number, there is no carry
2509         // out of the result.
2510         if (Known.isNonNegative())
2511           return Tmp;
2512       }
2513 
2514     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2515     if (Tmp2 == 1) break;
2516     return std::min(Tmp, Tmp2)-1;
2517 
2518   case Instruction::Sub:
2519     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2520     if (Tmp2 == 1) break;
2521 
2522     // Handle NEG.
2523     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2524       if (CLHS->isNullValue()) {
2525         KnownBits Known(TyBits);
2526         computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2527         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2528         // sign bits set.
2529         if ((Known.Zero | 1).isAllOnesValue())
2530           return TyBits;
2531 
2532         // If the input is known to be positive (the sign bit is known clear),
2533         // the output of the NEG has the same number of sign bits as the input.
2534         if (Known.isNonNegative())
2535           return Tmp2;
2536 
2537         // Otherwise, we treat this like a SUB.
2538       }
2539 
2540     // Sub can have at most one carry bit.  Thus we know that the output
2541     // is, at worst, one more bit than the inputs.
2542     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2543     if (Tmp == 1) break;
2544     return std::min(Tmp, Tmp2)-1;
2545 
2546   case Instruction::Mul: {
2547     // The output of the Mul can be at most twice the valid bits in the inputs.
2548     unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2549     if (SignBitsOp0 == 1) break;
2550     unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2551     if (SignBitsOp1 == 1) break;
2552     unsigned OutValidBits =
2553         (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2554     return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2555   }
2556 
2557   case Instruction::PHI: {
2558     const PHINode *PN = cast<PHINode>(U);
2559     unsigned NumIncomingValues = PN->getNumIncomingValues();
2560     // Don't analyze large in-degree PHIs.
2561     if (NumIncomingValues > 4) break;
2562     // Unreachable blocks may have zero-operand PHI nodes.
2563     if (NumIncomingValues == 0) break;
2564 
2565     // Take the minimum of all incoming values.  This can't infinitely loop
2566     // because of our depth threshold.
2567     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2568     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2569       if (Tmp == 1) return Tmp;
2570       Tmp = std::min(
2571           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2572     }
2573     return Tmp;
2574   }
2575 
2576   case Instruction::Trunc:
2577     // FIXME: it's tricky to do anything useful for this, but it is an important
2578     // case for targets like X86.
2579     break;
2580 
2581   case Instruction::ExtractElement:
2582     // Look through extract element. At the moment we keep this simple and skip
2583     // tracking the specific element. But at least we might find information
2584     // valid for all elements of the vector (for example if vector is sign
2585     // extended, shifted, etc).
2586     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2587 
2588   case Instruction::ShuffleVector: {
2589     // TODO: This is copied almost directly from the SelectionDAG version of
2590     //       ComputeNumSignBits. It would be better if we could share common
2591     //       code. If not, make sure that changes are translated to the DAG.
2592 
2593     // Collect the minimum number of sign bits that are shared by every vector
2594     // element referenced by the shuffle.
2595     auto *Shuf = cast<ShuffleVectorInst>(U);
2596     int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
2597     int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements();
2598     APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2599     for (int i = 0; i != NumMaskElts; ++i) {
2600       int M = Shuf->getMaskValue(i);
2601       assert(M < NumElts * 2 && "Invalid shuffle mask constant");
2602       // For undef elements, we don't know anything about the common state of
2603       // the shuffle result.
2604       if (M == -1)
2605         return 1;
2606       if (M < NumElts)
2607         DemandedLHS.setBit(M % NumElts);
2608       else
2609         DemandedRHS.setBit(M % NumElts);
2610     }
2611     Tmp = std::numeric_limits<unsigned>::max();
2612     if (!!DemandedLHS)
2613       Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q);
2614     if (!!DemandedRHS) {
2615       Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q);
2616       Tmp = std::min(Tmp, Tmp2);
2617     }
2618     // If we don't know anything, early out and try computeKnownBits fall-back.
2619     if (Tmp == 1)
2620       break;
2621     assert(Tmp <= V->getType()->getScalarSizeInBits() &&
2622            "Failed to determine minimum sign bits");
2623     return Tmp;
2624   }
2625   }
2626 
2627   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2628   // use this information.
2629 
2630   // If we can examine all elements of a vector constant successfully, we're
2631   // done (we can't do any better than that). If not, keep trying.
2632   if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2633     return VecSignBits;
2634 
2635   KnownBits Known(TyBits);
2636   computeKnownBits(V, Known, Depth, Q);
2637 
2638   // If we know that the sign bit is either zero or one, determine the number of
2639   // identical bits in the top of the input value.
2640   return std::max(FirstAnswer, Known.countMinSignBits());
2641 }
2642 
2643 /// This function computes the integer multiple of Base that equals V.
2644 /// If successful, it returns true and returns the multiple in
2645 /// Multiple. If unsuccessful, it returns false. It looks
2646 /// through SExt instructions only if LookThroughSExt is true.
2647 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2648                            bool LookThroughSExt, unsigned Depth) {
2649   const unsigned MaxDepth = 6;
2650 
2651   assert(V && "No Value?");
2652   assert(Depth <= MaxDepth && "Limit Search Depth");
2653   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2654 
2655   Type *T = V->getType();
2656 
2657   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2658 
2659   if (Base == 0)
2660     return false;
2661 
2662   if (Base == 1) {
2663     Multiple = V;
2664     return true;
2665   }
2666 
2667   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2668   Constant *BaseVal = ConstantInt::get(T, Base);
2669   if (CO && CO == BaseVal) {
2670     // Multiple is 1.
2671     Multiple = ConstantInt::get(T, 1);
2672     return true;
2673   }
2674 
2675   if (CI && CI->getZExtValue() % Base == 0) {
2676     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2677     return true;
2678   }
2679 
2680   if (Depth == MaxDepth) return false;  // Limit search depth.
2681 
2682   Operator *I = dyn_cast<Operator>(V);
2683   if (!I) return false;
2684 
2685   switch (I->getOpcode()) {
2686   default: break;
2687   case Instruction::SExt:
2688     if (!LookThroughSExt) return false;
2689     // otherwise fall through to ZExt
2690     LLVM_FALLTHROUGH;
2691   case Instruction::ZExt:
2692     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2693                            LookThroughSExt, Depth+1);
2694   case Instruction::Shl:
2695   case Instruction::Mul: {
2696     Value *Op0 = I->getOperand(0);
2697     Value *Op1 = I->getOperand(1);
2698 
2699     if (I->getOpcode() == Instruction::Shl) {
2700       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2701       if (!Op1CI) return false;
2702       // Turn Op0 << Op1 into Op0 * 2^Op1
2703       APInt Op1Int = Op1CI->getValue();
2704       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2705       APInt API(Op1Int.getBitWidth(), 0);
2706       API.setBit(BitToSet);
2707       Op1 = ConstantInt::get(V->getContext(), API);
2708     }
2709 
2710     Value *Mul0 = nullptr;
2711     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2712       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2713         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2714           if (Op1C->getType()->getPrimitiveSizeInBits() <
2715               MulC->getType()->getPrimitiveSizeInBits())
2716             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2717           if (Op1C->getType()->getPrimitiveSizeInBits() >
2718               MulC->getType()->getPrimitiveSizeInBits())
2719             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2720 
2721           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2722           Multiple = ConstantExpr::getMul(MulC, Op1C);
2723           return true;
2724         }
2725 
2726       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2727         if (Mul0CI->getValue() == 1) {
2728           // V == Base * Op1, so return Op1
2729           Multiple = Op1;
2730           return true;
2731         }
2732     }
2733 
2734     Value *Mul1 = nullptr;
2735     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2736       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2737         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2738           if (Op0C->getType()->getPrimitiveSizeInBits() <
2739               MulC->getType()->getPrimitiveSizeInBits())
2740             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2741           if (Op0C->getType()->getPrimitiveSizeInBits() >
2742               MulC->getType()->getPrimitiveSizeInBits())
2743             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2744 
2745           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2746           Multiple = ConstantExpr::getMul(MulC, Op0C);
2747           return true;
2748         }
2749 
2750       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2751         if (Mul1CI->getValue() == 1) {
2752           // V == Base * Op0, so return Op0
2753           Multiple = Op0;
2754           return true;
2755         }
2756     }
2757   }
2758   }
2759 
2760   // We could not determine if V is a multiple of Base.
2761   return false;
2762 }
2763 
2764 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2765                                             const TargetLibraryInfo *TLI) {
2766   const Function *F = ICS.getCalledFunction();
2767   if (!F)
2768     return Intrinsic::not_intrinsic;
2769 
2770   if (F->isIntrinsic())
2771     return F->getIntrinsicID();
2772 
2773   if (!TLI)
2774     return Intrinsic::not_intrinsic;
2775 
2776   LibFunc Func;
2777   // We're going to make assumptions on the semantics of the functions, check
2778   // that the target knows that it's available in this environment and it does
2779   // not have local linkage.
2780   if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2781     return Intrinsic::not_intrinsic;
2782 
2783   if (!ICS.onlyReadsMemory())
2784     return Intrinsic::not_intrinsic;
2785 
2786   // Otherwise check if we have a call to a function that can be turned into a
2787   // vector intrinsic.
2788   switch (Func) {
2789   default:
2790     break;
2791   case LibFunc_sin:
2792   case LibFunc_sinf:
2793   case LibFunc_sinl:
2794     return Intrinsic::sin;
2795   case LibFunc_cos:
2796   case LibFunc_cosf:
2797   case LibFunc_cosl:
2798     return Intrinsic::cos;
2799   case LibFunc_exp:
2800   case LibFunc_expf:
2801   case LibFunc_expl:
2802     return Intrinsic::exp;
2803   case LibFunc_exp2:
2804   case LibFunc_exp2f:
2805   case LibFunc_exp2l:
2806     return Intrinsic::exp2;
2807   case LibFunc_log:
2808   case LibFunc_logf:
2809   case LibFunc_logl:
2810     return Intrinsic::log;
2811   case LibFunc_log10:
2812   case LibFunc_log10f:
2813   case LibFunc_log10l:
2814     return Intrinsic::log10;
2815   case LibFunc_log2:
2816   case LibFunc_log2f:
2817   case LibFunc_log2l:
2818     return Intrinsic::log2;
2819   case LibFunc_fabs:
2820   case LibFunc_fabsf:
2821   case LibFunc_fabsl:
2822     return Intrinsic::fabs;
2823   case LibFunc_fmin:
2824   case LibFunc_fminf:
2825   case LibFunc_fminl:
2826     return Intrinsic::minnum;
2827   case LibFunc_fmax:
2828   case LibFunc_fmaxf:
2829   case LibFunc_fmaxl:
2830     return Intrinsic::maxnum;
2831   case LibFunc_copysign:
2832   case LibFunc_copysignf:
2833   case LibFunc_copysignl:
2834     return Intrinsic::copysign;
2835   case LibFunc_floor:
2836   case LibFunc_floorf:
2837   case LibFunc_floorl:
2838     return Intrinsic::floor;
2839   case LibFunc_ceil:
2840   case LibFunc_ceilf:
2841   case LibFunc_ceill:
2842     return Intrinsic::ceil;
2843   case LibFunc_trunc:
2844   case LibFunc_truncf:
2845   case LibFunc_truncl:
2846     return Intrinsic::trunc;
2847   case LibFunc_rint:
2848   case LibFunc_rintf:
2849   case LibFunc_rintl:
2850     return Intrinsic::rint;
2851   case LibFunc_nearbyint:
2852   case LibFunc_nearbyintf:
2853   case LibFunc_nearbyintl:
2854     return Intrinsic::nearbyint;
2855   case LibFunc_round:
2856   case LibFunc_roundf:
2857   case LibFunc_roundl:
2858     return Intrinsic::round;
2859   case LibFunc_pow:
2860   case LibFunc_powf:
2861   case LibFunc_powl:
2862     return Intrinsic::pow;
2863   case LibFunc_sqrt:
2864   case LibFunc_sqrtf:
2865   case LibFunc_sqrtl:
2866     return Intrinsic::sqrt;
2867   }
2868 
2869   return Intrinsic::not_intrinsic;
2870 }
2871 
2872 /// Return true if we can prove that the specified FP value is never equal to
2873 /// -0.0.
2874 ///
2875 /// NOTE: this function will need to be revisited when we support non-default
2876 /// rounding modes!
2877 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2878                                 unsigned Depth) {
2879   if (auto *CFP = dyn_cast<ConstantFP>(V))
2880     return !CFP->getValueAPF().isNegZero();
2881 
2882   // Limit search depth.
2883   if (Depth == MaxDepth)
2884     return false;
2885 
2886   auto *Op = dyn_cast<Operator>(V);
2887   if (!Op)
2888     return false;
2889 
2890   // Check if the nsz fast-math flag is set.
2891   if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2892     if (FPO->hasNoSignedZeros())
2893       return true;
2894 
2895   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2896   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
2897     return true;
2898 
2899   // sitofp and uitofp turn into +0.0 for zero.
2900   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2901     return true;
2902 
2903   if (auto *Call = dyn_cast<CallInst>(Op)) {
2904     Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2905     switch (IID) {
2906     default:
2907       break;
2908     // sqrt(-0.0) = -0.0, no other negative results are possible.
2909     case Intrinsic::sqrt:
2910     case Intrinsic::canonicalize:
2911       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2912     // fabs(x) != -0.0
2913     case Intrinsic::fabs:
2914       return true;
2915     }
2916   }
2917 
2918   return false;
2919 }
2920 
2921 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2922 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2923 /// bit despite comparing equal.
2924 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2925                                             const TargetLibraryInfo *TLI,
2926                                             bool SignBitOnly,
2927                                             unsigned Depth) {
2928   // TODO: This function does not do the right thing when SignBitOnly is true
2929   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2930   // which flips the sign bits of NaNs.  See
2931   // https://llvm.org/bugs/show_bug.cgi?id=31702.
2932 
2933   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2934     return !CFP->getValueAPF().isNegative() ||
2935            (!SignBitOnly && CFP->getValueAPF().isZero());
2936   }
2937 
2938   // Handle vector of constants.
2939   if (auto *CV = dyn_cast<Constant>(V)) {
2940     if (CV->getType()->isVectorTy()) {
2941       unsigned NumElts = CV->getType()->getVectorNumElements();
2942       for (unsigned i = 0; i != NumElts; ++i) {
2943         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
2944         if (!CFP)
2945           return false;
2946         if (CFP->getValueAPF().isNegative() &&
2947             (SignBitOnly || !CFP->getValueAPF().isZero()))
2948           return false;
2949       }
2950 
2951       // All non-negative ConstantFPs.
2952       return true;
2953     }
2954   }
2955 
2956   if (Depth == MaxDepth)
2957     return false; // Limit search depth.
2958 
2959   const Operator *I = dyn_cast<Operator>(V);
2960   if (!I)
2961     return false;
2962 
2963   switch (I->getOpcode()) {
2964   default:
2965     break;
2966   // Unsigned integers are always nonnegative.
2967   case Instruction::UIToFP:
2968     return true;
2969   case Instruction::FMul:
2970     // x*x is always non-negative or a NaN.
2971     if (I->getOperand(0) == I->getOperand(1) &&
2972         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2973       return true;
2974 
2975     LLVM_FALLTHROUGH;
2976   case Instruction::FAdd:
2977   case Instruction::FDiv:
2978   case Instruction::FRem:
2979     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2980                                            Depth + 1) &&
2981            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2982                                            Depth + 1);
2983   case Instruction::Select:
2984     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2985                                            Depth + 1) &&
2986            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2987                                            Depth + 1);
2988   case Instruction::FPExt:
2989   case Instruction::FPTrunc:
2990     // Widening/narrowing never change sign.
2991     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2992                                            Depth + 1);
2993   case Instruction::ExtractElement:
2994     // Look through extract element. At the moment we keep this simple and skip
2995     // tracking the specific element. But at least we might find information
2996     // valid for all elements of the vector.
2997     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2998                                            Depth + 1);
2999   case Instruction::Call:
3000     const auto *CI = cast<CallInst>(I);
3001     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
3002     switch (IID) {
3003     default:
3004       break;
3005     case Intrinsic::maxnum:
3006       return (isKnownNeverNaN(I->getOperand(0), TLI) &&
3007               cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI,
3008                                               SignBitOnly, Depth + 1)) ||
3009             (isKnownNeverNaN(I->getOperand(1), TLI) &&
3010               cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
3011                                               SignBitOnly, Depth + 1));
3012 
3013     case Intrinsic::maximum:
3014       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3015                                              Depth + 1) ||
3016              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3017                                              Depth + 1);
3018     case Intrinsic::minnum:
3019     case Intrinsic::minimum:
3020       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3021                                              Depth + 1) &&
3022              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3023                                              Depth + 1);
3024     case Intrinsic::exp:
3025     case Intrinsic::exp2:
3026     case Intrinsic::fabs:
3027       return true;
3028 
3029     case Intrinsic::sqrt:
3030       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3031       if (!SignBitOnly)
3032         return true;
3033       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3034                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3035 
3036     case Intrinsic::powi:
3037       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3038         // powi(x,n) is non-negative if n is even.
3039         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3040           return true;
3041       }
3042       // TODO: This is not correct.  Given that exp is an integer, here are the
3043       // ways that pow can return a negative value:
3044       //
3045       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3046       //   pow(-0, exp)   --> -inf if exp is negative odd.
3047       //   pow(-0, exp)   --> -0 if exp is positive odd.
3048       //   pow(-inf, exp) --> -0 if exp is negative odd.
3049       //   pow(-inf, exp) --> -inf if exp is positive odd.
3050       //
3051       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3052       // but we must return false if x == -0.  Unfortunately we do not currently
3053       // have a way of expressing this constraint.  See details in
3054       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3055       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3056                                              Depth + 1);
3057 
3058     case Intrinsic::fma:
3059     case Intrinsic::fmuladd:
3060       // x*x+y is non-negative if y is non-negative.
3061       return I->getOperand(0) == I->getOperand(1) &&
3062              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3063              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3064                                              Depth + 1);
3065     }
3066     break;
3067   }
3068   return false;
3069 }
3070 
3071 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3072                                        const TargetLibraryInfo *TLI) {
3073   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3074 }
3075 
3076 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3077   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3078 }
3079 
3080 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3081                            unsigned Depth) {
3082   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3083 
3084   // If we're told that NaNs won't happen, assume they won't.
3085   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3086     if (FPMathOp->hasNoNaNs())
3087       return true;
3088 
3089   // Handle scalar constants.
3090   if (auto *CFP = dyn_cast<ConstantFP>(V))
3091     return !CFP->isNaN();
3092 
3093   if (Depth == MaxDepth)
3094     return false;
3095 
3096   if (auto *Inst = dyn_cast<Instruction>(V)) {
3097     switch (Inst->getOpcode()) {
3098     case Instruction::FAdd:
3099     case Instruction::FMul:
3100     case Instruction::FSub:
3101     case Instruction::FDiv:
3102     case Instruction::FRem: {
3103       // TODO: Need isKnownNeverInfinity
3104       return false;
3105     }
3106     case Instruction::Select: {
3107       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3108              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3109     }
3110     case Instruction::SIToFP:
3111     case Instruction::UIToFP:
3112       return true;
3113     case Instruction::FPTrunc:
3114     case Instruction::FPExt:
3115       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3116     default:
3117       break;
3118     }
3119   }
3120 
3121   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3122     switch (II->getIntrinsicID()) {
3123     case Intrinsic::canonicalize:
3124     case Intrinsic::fabs:
3125     case Intrinsic::copysign:
3126     case Intrinsic::exp:
3127     case Intrinsic::exp2:
3128     case Intrinsic::floor:
3129     case Intrinsic::ceil:
3130     case Intrinsic::trunc:
3131     case Intrinsic::rint:
3132     case Intrinsic::nearbyint:
3133     case Intrinsic::round:
3134       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3135     case Intrinsic::sqrt:
3136       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3137              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3138     case Intrinsic::minnum:
3139     case Intrinsic::maxnum:
3140       // If either operand is not NaN, the result is not NaN.
3141       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3142              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3143     default:
3144       return false;
3145     }
3146   }
3147 
3148   // Bail out for constant expressions, but try to handle vector constants.
3149   if (!V->getType()->isVectorTy() || !isa<Constant>(V))
3150     return false;
3151 
3152   // For vectors, verify that each element is not NaN.
3153   unsigned NumElts = V->getType()->getVectorNumElements();
3154   for (unsigned i = 0; i != NumElts; ++i) {
3155     Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3156     if (!Elt)
3157       return false;
3158     if (isa<UndefValue>(Elt))
3159       continue;
3160     auto *CElt = dyn_cast<ConstantFP>(Elt);
3161     if (!CElt || CElt->isNaN())
3162       return false;
3163   }
3164   // All elements were confirmed not-NaN or undefined.
3165   return true;
3166 }
3167 
3168 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3169 
3170   // All byte-wide stores are splatable, even of arbitrary variables.
3171   if (V->getType()->isIntegerTy(8))
3172     return V;
3173 
3174   LLVMContext &Ctx = V->getContext();
3175 
3176   // Undef don't care.
3177   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3178   if (isa<UndefValue>(V))
3179     return UndefInt8;
3180 
3181   const uint64_t Size = DL.getTypeStoreSize(V->getType());
3182   if (!Size)
3183     return UndefInt8;
3184 
3185   Constant *C = dyn_cast<Constant>(V);
3186   if (!C) {
3187     // Conceptually, we could handle things like:
3188     //   %a = zext i8 %X to i16
3189     //   %b = shl i16 %a, 8
3190     //   %c = or i16 %a, %b
3191     // but until there is an example that actually needs this, it doesn't seem
3192     // worth worrying about.
3193     return nullptr;
3194   }
3195 
3196   // Handle 'null' ConstantArrayZero etc.
3197   if (C->isNullValue())
3198     return Constant::getNullValue(Type::getInt8Ty(Ctx));
3199 
3200   // Constant floating-point values can be handled as integer values if the
3201   // corresponding integer value is "byteable".  An important case is 0.0.
3202   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3203     Type *Ty = nullptr;
3204     if (CFP->getType()->isHalfTy())
3205       Ty = Type::getInt16Ty(Ctx);
3206     else if (CFP->getType()->isFloatTy())
3207       Ty = Type::getInt32Ty(Ctx);
3208     else if (CFP->getType()->isDoubleTy())
3209       Ty = Type::getInt64Ty(Ctx);
3210     // Don't handle long double formats, which have strange constraints.
3211     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3212               : nullptr;
3213   }
3214 
3215   // We can handle constant integers that are multiple of 8 bits.
3216   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3217     if (CI->getBitWidth() % 8 == 0) {
3218       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3219       if (!CI->getValue().isSplat(8))
3220         return nullptr;
3221       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3222     }
3223   }
3224 
3225   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3226     if (CE->getOpcode() == Instruction::IntToPtr) {
3227       auto PS = DL.getPointerSizeInBits(
3228           cast<PointerType>(CE->getType())->getAddressSpace());
3229       return isBytewiseValue(
3230           ConstantExpr::getIntegerCast(CE->getOperand(0),
3231                                        Type::getIntNTy(Ctx, PS), false),
3232           DL);
3233     }
3234   }
3235 
3236   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3237     if (LHS == RHS)
3238       return LHS;
3239     if (!LHS || !RHS)
3240       return nullptr;
3241     if (LHS == UndefInt8)
3242       return RHS;
3243     if (RHS == UndefInt8)
3244       return LHS;
3245     return nullptr;
3246   };
3247 
3248   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3249     Value *Val = UndefInt8;
3250     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3251       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3252         return nullptr;
3253     return Val;
3254   }
3255 
3256   if (isa<ConstantVector>(C)) {
3257     Constant *Splat = cast<ConstantVector>(C)->getSplatValue();
3258     return Splat ? isBytewiseValue(Splat, DL) : nullptr;
3259   }
3260 
3261   if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
3262     Value *Val = UndefInt8;
3263     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3264       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3265         return nullptr;
3266     return Val;
3267   }
3268 
3269   // Don't try to handle the handful of other constants.
3270   return nullptr;
3271 }
3272 
3273 // This is the recursive version of BuildSubAggregate. It takes a few different
3274 // arguments. Idxs is the index within the nested struct From that we are
3275 // looking at now (which is of type IndexedType). IdxSkip is the number of
3276 // indices from Idxs that should be left out when inserting into the resulting
3277 // struct. To is the result struct built so far, new insertvalue instructions
3278 // build on that.
3279 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3280                                 SmallVectorImpl<unsigned> &Idxs,
3281                                 unsigned IdxSkip,
3282                                 Instruction *InsertBefore) {
3283   StructType *STy = dyn_cast<StructType>(IndexedType);
3284   if (STy) {
3285     // Save the original To argument so we can modify it
3286     Value *OrigTo = To;
3287     // General case, the type indexed by Idxs is a struct
3288     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3289       // Process each struct element recursively
3290       Idxs.push_back(i);
3291       Value *PrevTo = To;
3292       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3293                              InsertBefore);
3294       Idxs.pop_back();
3295       if (!To) {
3296         // Couldn't find any inserted value for this index? Cleanup
3297         while (PrevTo != OrigTo) {
3298           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3299           PrevTo = Del->getAggregateOperand();
3300           Del->eraseFromParent();
3301         }
3302         // Stop processing elements
3303         break;
3304       }
3305     }
3306     // If we successfully found a value for each of our subaggregates
3307     if (To)
3308       return To;
3309   }
3310   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3311   // the struct's elements had a value that was inserted directly. In the latter
3312   // case, perhaps we can't determine each of the subelements individually, but
3313   // we might be able to find the complete struct somewhere.
3314 
3315   // Find the value that is at that particular spot
3316   Value *V = FindInsertedValue(From, Idxs);
3317 
3318   if (!V)
3319     return nullptr;
3320 
3321   // Insert the value in the new (sub) aggregate
3322   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3323                                  "tmp", InsertBefore);
3324 }
3325 
3326 // This helper takes a nested struct and extracts a part of it (which is again a
3327 // struct) into a new value. For example, given the struct:
3328 // { a, { b, { c, d }, e } }
3329 // and the indices "1, 1" this returns
3330 // { c, d }.
3331 //
3332 // It does this by inserting an insertvalue for each element in the resulting
3333 // struct, as opposed to just inserting a single struct. This will only work if
3334 // each of the elements of the substruct are known (ie, inserted into From by an
3335 // insertvalue instruction somewhere).
3336 //
3337 // All inserted insertvalue instructions are inserted before InsertBefore
3338 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3339                                 Instruction *InsertBefore) {
3340   assert(InsertBefore && "Must have someplace to insert!");
3341   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3342                                                              idx_range);
3343   Value *To = UndefValue::get(IndexedType);
3344   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3345   unsigned IdxSkip = Idxs.size();
3346 
3347   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3348 }
3349 
3350 /// Given an aggregate and a sequence of indices, see if the scalar value
3351 /// indexed is already around as a register, for example if it was inserted
3352 /// directly into the aggregate.
3353 ///
3354 /// If InsertBefore is not null, this function will duplicate (modified)
3355 /// insertvalues when a part of a nested struct is extracted.
3356 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3357                                Instruction *InsertBefore) {
3358   // Nothing to index? Just return V then (this is useful at the end of our
3359   // recursion).
3360   if (idx_range.empty())
3361     return V;
3362   // We have indices, so V should have an indexable type.
3363   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3364          "Not looking at a struct or array?");
3365   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3366          "Invalid indices for type?");
3367 
3368   if (Constant *C = dyn_cast<Constant>(V)) {
3369     C = C->getAggregateElement(idx_range[0]);
3370     if (!C) return nullptr;
3371     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3372   }
3373 
3374   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3375     // Loop the indices for the insertvalue instruction in parallel with the
3376     // requested indices
3377     const unsigned *req_idx = idx_range.begin();
3378     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3379          i != e; ++i, ++req_idx) {
3380       if (req_idx == idx_range.end()) {
3381         // We can't handle this without inserting insertvalues
3382         if (!InsertBefore)
3383           return nullptr;
3384 
3385         // The requested index identifies a part of a nested aggregate. Handle
3386         // this specially. For example,
3387         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3388         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3389         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3390         // This can be changed into
3391         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3392         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3393         // which allows the unused 0,0 element from the nested struct to be
3394         // removed.
3395         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3396                                  InsertBefore);
3397       }
3398 
3399       // This insert value inserts something else than what we are looking for.
3400       // See if the (aggregate) value inserted into has the value we are
3401       // looking for, then.
3402       if (*req_idx != *i)
3403         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3404                                  InsertBefore);
3405     }
3406     // If we end up here, the indices of the insertvalue match with those
3407     // requested (though possibly only partially). Now we recursively look at
3408     // the inserted value, passing any remaining indices.
3409     return FindInsertedValue(I->getInsertedValueOperand(),
3410                              makeArrayRef(req_idx, idx_range.end()),
3411                              InsertBefore);
3412   }
3413 
3414   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3415     // If we're extracting a value from an aggregate that was extracted from
3416     // something else, we can extract from that something else directly instead.
3417     // However, we will need to chain I's indices with the requested indices.
3418 
3419     // Calculate the number of indices required
3420     unsigned size = I->getNumIndices() + idx_range.size();
3421     // Allocate some space to put the new indices in
3422     SmallVector<unsigned, 5> Idxs;
3423     Idxs.reserve(size);
3424     // Add indices from the extract value instruction
3425     Idxs.append(I->idx_begin(), I->idx_end());
3426 
3427     // Add requested indices
3428     Idxs.append(idx_range.begin(), idx_range.end());
3429 
3430     assert(Idxs.size() == size
3431            && "Number of indices added not correct?");
3432 
3433     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3434   }
3435   // Otherwise, we don't know (such as, extracting from a function return value
3436   // or load instruction)
3437   return nullptr;
3438 }
3439 
3440 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3441                                        unsigned CharSize) {
3442   // Make sure the GEP has exactly three arguments.
3443   if (GEP->getNumOperands() != 3)
3444     return false;
3445 
3446   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3447   // CharSize.
3448   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3449   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3450     return false;
3451 
3452   // Check to make sure that the first operand of the GEP is an integer and
3453   // has value 0 so that we are sure we're indexing into the initializer.
3454   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3455   if (!FirstIdx || !FirstIdx->isZero())
3456     return false;
3457 
3458   return true;
3459 }
3460 
3461 bool llvm::getConstantDataArrayInfo(const Value *V,
3462                                     ConstantDataArraySlice &Slice,
3463                                     unsigned ElementSize, uint64_t Offset) {
3464   assert(V);
3465 
3466   // Look through bitcast instructions and geps.
3467   V = V->stripPointerCasts();
3468 
3469   // If the value is a GEP instruction or constant expression, treat it as an
3470   // offset.
3471   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3472     // The GEP operator should be based on a pointer to string constant, and is
3473     // indexing into the string constant.
3474     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3475       return false;
3476 
3477     // If the second index isn't a ConstantInt, then this is a variable index
3478     // into the array.  If this occurs, we can't say anything meaningful about
3479     // the string.
3480     uint64_t StartIdx = 0;
3481     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3482       StartIdx = CI->getZExtValue();
3483     else
3484       return false;
3485     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3486                                     StartIdx + Offset);
3487   }
3488 
3489   // The GEP instruction, constant or instruction, must reference a global
3490   // variable that is a constant and is initialized. The referenced constant
3491   // initializer is the array that we'll use for optimization.
3492   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3493   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3494     return false;
3495 
3496   const ConstantDataArray *Array;
3497   ArrayType *ArrayTy;
3498   if (GV->getInitializer()->isNullValue()) {
3499     Type *GVTy = GV->getValueType();
3500     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3501       // A zeroinitializer for the array; there is no ConstantDataArray.
3502       Array = nullptr;
3503     } else {
3504       const DataLayout &DL = GV->getParent()->getDataLayout();
3505       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3506       uint64_t Length = SizeInBytes / (ElementSize / 8);
3507       if (Length <= Offset)
3508         return false;
3509 
3510       Slice.Array = nullptr;
3511       Slice.Offset = 0;
3512       Slice.Length = Length - Offset;
3513       return true;
3514     }
3515   } else {
3516     // This must be a ConstantDataArray.
3517     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3518     if (!Array)
3519       return false;
3520     ArrayTy = Array->getType();
3521   }
3522   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3523     return false;
3524 
3525   uint64_t NumElts = ArrayTy->getArrayNumElements();
3526   if (Offset > NumElts)
3527     return false;
3528 
3529   Slice.Array = Array;
3530   Slice.Offset = Offset;
3531   Slice.Length = NumElts - Offset;
3532   return true;
3533 }
3534 
3535 /// This function computes the length of a null-terminated C string pointed to
3536 /// by V. If successful, it returns true and returns the string in Str.
3537 /// If unsuccessful, it returns false.
3538 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3539                                  uint64_t Offset, bool TrimAtNul) {
3540   ConstantDataArraySlice Slice;
3541   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3542     return false;
3543 
3544   if (Slice.Array == nullptr) {
3545     if (TrimAtNul) {
3546       Str = StringRef();
3547       return true;
3548     }
3549     if (Slice.Length == 1) {
3550       Str = StringRef("", 1);
3551       return true;
3552     }
3553     // We cannot instantiate a StringRef as we do not have an appropriate string
3554     // of 0s at hand.
3555     return false;
3556   }
3557 
3558   // Start out with the entire array in the StringRef.
3559   Str = Slice.Array->getAsString();
3560   // Skip over 'offset' bytes.
3561   Str = Str.substr(Slice.Offset);
3562 
3563   if (TrimAtNul) {
3564     // Trim off the \0 and anything after it.  If the array is not nul
3565     // terminated, we just return the whole end of string.  The client may know
3566     // some other way that the string is length-bound.
3567     Str = Str.substr(0, Str.find('\0'));
3568   }
3569   return true;
3570 }
3571 
3572 // These next two are very similar to the above, but also look through PHI
3573 // nodes.
3574 // TODO: See if we can integrate these two together.
3575 
3576 /// If we can compute the length of the string pointed to by
3577 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3578 static uint64_t GetStringLengthH(const Value *V,
3579                                  SmallPtrSetImpl<const PHINode*> &PHIs,
3580                                  unsigned CharSize) {
3581   // Look through noop bitcast instructions.
3582   V = V->stripPointerCasts();
3583 
3584   // If this is a PHI node, there are two cases: either we have already seen it
3585   // or we haven't.
3586   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3587     if (!PHIs.insert(PN).second)
3588       return ~0ULL;  // already in the set.
3589 
3590     // If it was new, see if all the input strings are the same length.
3591     uint64_t LenSoFar = ~0ULL;
3592     for (Value *IncValue : PN->incoming_values()) {
3593       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3594       if (Len == 0) return 0; // Unknown length -> unknown.
3595 
3596       if (Len == ~0ULL) continue;
3597 
3598       if (Len != LenSoFar && LenSoFar != ~0ULL)
3599         return 0;    // Disagree -> unknown.
3600       LenSoFar = Len;
3601     }
3602 
3603     // Success, all agree.
3604     return LenSoFar;
3605   }
3606 
3607   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3608   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3609     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3610     if (Len1 == 0) return 0;
3611     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3612     if (Len2 == 0) return 0;
3613     if (Len1 == ~0ULL) return Len2;
3614     if (Len2 == ~0ULL) return Len1;
3615     if (Len1 != Len2) return 0;
3616     return Len1;
3617   }
3618 
3619   // Otherwise, see if we can read the string.
3620   ConstantDataArraySlice Slice;
3621   if (!getConstantDataArrayInfo(V, Slice, CharSize))
3622     return 0;
3623 
3624   if (Slice.Array == nullptr)
3625     return 1;
3626 
3627   // Search for nul characters
3628   unsigned NullIndex = 0;
3629   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3630     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3631       break;
3632   }
3633 
3634   return NullIndex + 1;
3635 }
3636 
3637 /// If we can compute the length of the string pointed to by
3638 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3639 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3640   if (!V->getType()->isPointerTy())
3641     return 0;
3642 
3643   SmallPtrSet<const PHINode*, 32> PHIs;
3644   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3645   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3646   // an empty string as a length.
3647   return Len == ~0ULL ? 1 : Len;
3648 }
3649 
3650 const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) {
3651   assert(Call &&
3652          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
3653   if (const Value *RV = Call->getReturnedArgOperand())
3654     return RV;
3655   // This can be used only as a aliasing property.
3656   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call))
3657     return Call->getArgOperand(0);
3658   return nullptr;
3659 }
3660 
3661 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
3662     const CallBase *Call) {
3663   return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
3664          Call->getIntrinsicID() == Intrinsic::strip_invariant_group ||
3665          Call->getIntrinsicID() == Intrinsic::aarch64_irg;
3666 }
3667 
3668 /// \p PN defines a loop-variant pointer to an object.  Check if the
3669 /// previous iteration of the loop was referring to the same object as \p PN.
3670 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3671                                          const LoopInfo *LI) {
3672   // Find the loop-defined value.
3673   Loop *L = LI->getLoopFor(PN->getParent());
3674   if (PN->getNumIncomingValues() != 2)
3675     return true;
3676 
3677   // Find the value from previous iteration.
3678   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3679   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3680     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3681   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3682     return true;
3683 
3684   // If a new pointer is loaded in the loop, the pointer references a different
3685   // object in every iteration.  E.g.:
3686   //    for (i)
3687   //       int *p = a[i];
3688   //       ...
3689   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3690     if (!L->isLoopInvariant(Load->getPointerOperand()))
3691       return false;
3692   return true;
3693 }
3694 
3695 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3696                                  unsigned MaxLookup) {
3697   if (!V->getType()->isPointerTy())
3698     return V;
3699   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3700     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3701       V = GEP->getPointerOperand();
3702     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3703                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3704       V = cast<Operator>(V)->getOperand(0);
3705     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3706       if (GA->isInterposable())
3707         return V;
3708       V = GA->getAliasee();
3709     } else if (isa<AllocaInst>(V)) {
3710       // An alloca can't be further simplified.
3711       return V;
3712     } else {
3713       if (auto *Call = dyn_cast<CallBase>(V)) {
3714         // CaptureTracking can know about special capturing properties of some
3715         // intrinsics like launder.invariant.group, that can't be expressed with
3716         // the attributes, but have properties like returning aliasing pointer.
3717         // Because some analysis may assume that nocaptured pointer is not
3718         // returned from some special intrinsic (because function would have to
3719         // be marked with returns attribute), it is crucial to use this function
3720         // because it should be in sync with CaptureTracking. Not using it may
3721         // cause weird miscompilations where 2 aliasing pointers are assumed to
3722         // noalias.
3723         if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
3724           V = RP;
3725           continue;
3726         }
3727       }
3728 
3729       // See if InstructionSimplify knows any relevant tricks.
3730       if (Instruction *I = dyn_cast<Instruction>(V))
3731         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3732         if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3733           V = Simplified;
3734           continue;
3735         }
3736 
3737       return V;
3738     }
3739     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3740   }
3741   return V;
3742 }
3743 
3744 void llvm::GetUnderlyingObjects(const Value *V,
3745                                 SmallVectorImpl<const Value *> &Objects,
3746                                 const DataLayout &DL, LoopInfo *LI,
3747                                 unsigned MaxLookup) {
3748   SmallPtrSet<const Value *, 4> Visited;
3749   SmallVector<const Value *, 4> Worklist;
3750   Worklist.push_back(V);
3751   do {
3752     const Value *P = Worklist.pop_back_val();
3753     P = GetUnderlyingObject(P, DL, MaxLookup);
3754 
3755     if (!Visited.insert(P).second)
3756       continue;
3757 
3758     if (auto *SI = dyn_cast<SelectInst>(P)) {
3759       Worklist.push_back(SI->getTrueValue());
3760       Worklist.push_back(SI->getFalseValue());
3761       continue;
3762     }
3763 
3764     if (auto *PN = dyn_cast<PHINode>(P)) {
3765       // If this PHI changes the underlying object in every iteration of the
3766       // loop, don't look through it.  Consider:
3767       //   int **A;
3768       //   for (i) {
3769       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3770       //     Curr = A[i];
3771       //     *Prev, *Curr;
3772       //
3773       // Prev is tracking Curr one iteration behind so they refer to different
3774       // underlying objects.
3775       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3776           isSameUnderlyingObjectInLoop(PN, LI))
3777         for (Value *IncValue : PN->incoming_values())
3778           Worklist.push_back(IncValue);
3779       continue;
3780     }
3781 
3782     Objects.push_back(P);
3783   } while (!Worklist.empty());
3784 }
3785 
3786 /// This is the function that does the work of looking through basic
3787 /// ptrtoint+arithmetic+inttoptr sequences.
3788 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3789   do {
3790     if (const Operator *U = dyn_cast<Operator>(V)) {
3791       // If we find a ptrtoint, we can transfer control back to the
3792       // regular getUnderlyingObjectFromInt.
3793       if (U->getOpcode() == Instruction::PtrToInt)
3794         return U->getOperand(0);
3795       // If we find an add of a constant, a multiplied value, or a phi, it's
3796       // likely that the other operand will lead us to the base
3797       // object. We don't have to worry about the case where the
3798       // object address is somehow being computed by the multiply,
3799       // because our callers only care when the result is an
3800       // identifiable object.
3801       if (U->getOpcode() != Instruction::Add ||
3802           (!isa<ConstantInt>(U->getOperand(1)) &&
3803            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3804            !isa<PHINode>(U->getOperand(1))))
3805         return V;
3806       V = U->getOperand(0);
3807     } else {
3808       return V;
3809     }
3810     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3811   } while (true);
3812 }
3813 
3814 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3815 /// ptrtoint+arithmetic+inttoptr sequences.
3816 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3817 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
3818                           SmallVectorImpl<Value *> &Objects,
3819                           const DataLayout &DL) {
3820   SmallPtrSet<const Value *, 16> Visited;
3821   SmallVector<const Value *, 4> Working(1, V);
3822   do {
3823     V = Working.pop_back_val();
3824 
3825     SmallVector<const Value *, 4> Objs;
3826     GetUnderlyingObjects(V, Objs, DL);
3827 
3828     for (const Value *V : Objs) {
3829       if (!Visited.insert(V).second)
3830         continue;
3831       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3832         const Value *O =
3833           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3834         if (O->getType()->isPointerTy()) {
3835           Working.push_back(O);
3836           continue;
3837         }
3838       }
3839       // If GetUnderlyingObjects fails to find an identifiable object,
3840       // getUnderlyingObjectsForCodeGen also fails for safety.
3841       if (!isIdentifiedObject(V)) {
3842         Objects.clear();
3843         return false;
3844       }
3845       Objects.push_back(const_cast<Value *>(V));
3846     }
3847   } while (!Working.empty());
3848   return true;
3849 }
3850 
3851 /// Return true if the only users of this pointer are lifetime markers.
3852 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3853   for (const User *U : V->users()) {
3854     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3855     if (!II) return false;
3856 
3857     if (!II->isLifetimeStartOrEnd())
3858       return false;
3859   }
3860   return true;
3861 }
3862 
3863 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3864                                         const Instruction *CtxI,
3865                                         const DominatorTree *DT) {
3866   const Operator *Inst = dyn_cast<Operator>(V);
3867   if (!Inst)
3868     return false;
3869 
3870   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3871     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3872       if (C->canTrap())
3873         return false;
3874 
3875   switch (Inst->getOpcode()) {
3876   default:
3877     return true;
3878   case Instruction::UDiv:
3879   case Instruction::URem: {
3880     // x / y is undefined if y == 0.
3881     const APInt *V;
3882     if (match(Inst->getOperand(1), m_APInt(V)))
3883       return *V != 0;
3884     return false;
3885   }
3886   case Instruction::SDiv:
3887   case Instruction::SRem: {
3888     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3889     const APInt *Numerator, *Denominator;
3890     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3891       return false;
3892     // We cannot hoist this division if the denominator is 0.
3893     if (*Denominator == 0)
3894       return false;
3895     // It's safe to hoist if the denominator is not 0 or -1.
3896     if (*Denominator != -1)
3897       return true;
3898     // At this point we know that the denominator is -1.  It is safe to hoist as
3899     // long we know that the numerator is not INT_MIN.
3900     if (match(Inst->getOperand(0), m_APInt(Numerator)))
3901       return !Numerator->isMinSignedValue();
3902     // The numerator *might* be MinSignedValue.
3903     return false;
3904   }
3905   case Instruction::Load: {
3906     const LoadInst *LI = cast<LoadInst>(Inst);
3907     if (!LI->isUnordered() ||
3908         // Speculative load may create a race that did not exist in the source.
3909         LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3910         // Speculative load may load data from dirty regions.
3911         LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3912         LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3913       return false;
3914     const DataLayout &DL = LI->getModule()->getDataLayout();
3915     return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3916                                               LI->getType(), LI->getAlignment(),
3917                                               DL, CtxI, DT);
3918   }
3919   case Instruction::Call: {
3920     auto *CI = cast<const CallInst>(Inst);
3921     const Function *Callee = CI->getCalledFunction();
3922 
3923     // The called function could have undefined behavior or side-effects, even
3924     // if marked readnone nounwind.
3925     return Callee && Callee->isSpeculatable();
3926   }
3927   case Instruction::VAArg:
3928   case Instruction::Alloca:
3929   case Instruction::Invoke:
3930   case Instruction::CallBr:
3931   case Instruction::PHI:
3932   case Instruction::Store:
3933   case Instruction::Ret:
3934   case Instruction::Br:
3935   case Instruction::IndirectBr:
3936   case Instruction::Switch:
3937   case Instruction::Unreachable:
3938   case Instruction::Fence:
3939   case Instruction::AtomicRMW:
3940   case Instruction::AtomicCmpXchg:
3941   case Instruction::LandingPad:
3942   case Instruction::Resume:
3943   case Instruction::CatchSwitch:
3944   case Instruction::CatchPad:
3945   case Instruction::CatchRet:
3946   case Instruction::CleanupPad:
3947   case Instruction::CleanupRet:
3948     return false; // Misc instructions which have effects
3949   }
3950 }
3951 
3952 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3953   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3954 }
3955 
3956 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
3957 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
3958   switch (OR) {
3959     case ConstantRange::OverflowResult::MayOverflow:
3960       return OverflowResult::MayOverflow;
3961     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
3962       return OverflowResult::AlwaysOverflowsLow;
3963     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
3964       return OverflowResult::AlwaysOverflowsHigh;
3965     case ConstantRange::OverflowResult::NeverOverflows:
3966       return OverflowResult::NeverOverflows;
3967   }
3968   llvm_unreachable("Unknown OverflowResult");
3969 }
3970 
3971 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
3972 static ConstantRange computeConstantRangeIncludingKnownBits(
3973     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
3974     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
3975     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
3976   KnownBits Known = computeKnownBits(
3977       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
3978   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
3979   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
3980   ConstantRange::PreferredRangeType RangeType =
3981       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
3982   return CR1.intersectWith(CR2, RangeType);
3983 }
3984 
3985 OverflowResult llvm::computeOverflowForUnsignedMul(
3986     const Value *LHS, const Value *RHS, const DataLayout &DL,
3987     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
3988     bool UseInstrInfo) {
3989   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
3990                                         nullptr, UseInstrInfo);
3991   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
3992                                         nullptr, UseInstrInfo);
3993   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
3994   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
3995   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
3996 }
3997 
3998 OverflowResult
3999 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4000                                   const DataLayout &DL, AssumptionCache *AC,
4001                                   const Instruction *CxtI,
4002                                   const DominatorTree *DT, bool UseInstrInfo) {
4003   // Multiplying n * m significant bits yields a result of n + m significant
4004   // bits. If the total number of significant bits does not exceed the
4005   // result bit width (minus 1), there is no overflow.
4006   // This means if we have enough leading sign bits in the operands
4007   // we can guarantee that the result does not overflow.
4008   // Ref: "Hacker's Delight" by Henry Warren
4009   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4010 
4011   // Note that underestimating the number of sign bits gives a more
4012   // conservative answer.
4013   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4014                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4015 
4016   // First handle the easy case: if we have enough sign bits there's
4017   // definitely no overflow.
4018   if (SignBits > BitWidth + 1)
4019     return OverflowResult::NeverOverflows;
4020 
4021   // There are two ambiguous cases where there can be no overflow:
4022   //   SignBits == BitWidth + 1    and
4023   //   SignBits == BitWidth
4024   // The second case is difficult to check, therefore we only handle the
4025   // first case.
4026   if (SignBits == BitWidth + 1) {
4027     // It overflows only when both arguments are negative and the true
4028     // product is exactly the minimum negative number.
4029     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4030     // For simplicity we just check if at least one side is not negative.
4031     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4032                                           nullptr, UseInstrInfo);
4033     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4034                                           nullptr, UseInstrInfo);
4035     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4036       return OverflowResult::NeverOverflows;
4037   }
4038   return OverflowResult::MayOverflow;
4039 }
4040 
4041 OverflowResult llvm::computeOverflowForUnsignedAdd(
4042     const Value *LHS, const Value *RHS, const DataLayout &DL,
4043     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4044     bool UseInstrInfo) {
4045   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4046       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4047       nullptr, UseInstrInfo);
4048   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4049       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4050       nullptr, UseInstrInfo);
4051   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4052 }
4053 
4054 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4055                                                   const Value *RHS,
4056                                                   const AddOperator *Add,
4057                                                   const DataLayout &DL,
4058                                                   AssumptionCache *AC,
4059                                                   const Instruction *CxtI,
4060                                                   const DominatorTree *DT) {
4061   if (Add && Add->hasNoSignedWrap()) {
4062     return OverflowResult::NeverOverflows;
4063   }
4064 
4065   // If LHS and RHS each have at least two sign bits, the addition will look
4066   // like
4067   //
4068   // XX..... +
4069   // YY.....
4070   //
4071   // If the carry into the most significant position is 0, X and Y can't both
4072   // be 1 and therefore the carry out of the addition is also 0.
4073   //
4074   // If the carry into the most significant position is 1, X and Y can't both
4075   // be 0 and therefore the carry out of the addition is also 1.
4076   //
4077   // Since the carry into the most significant position is always equal to
4078   // the carry out of the addition, there is no signed overflow.
4079   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4080       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4081     return OverflowResult::NeverOverflows;
4082 
4083   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4084       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4085   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4086       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4087   OverflowResult OR =
4088       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4089   if (OR != OverflowResult::MayOverflow)
4090     return OR;
4091 
4092   // The remaining code needs Add to be available. Early returns if not so.
4093   if (!Add)
4094     return OverflowResult::MayOverflow;
4095 
4096   // If the sign of Add is the same as at least one of the operands, this add
4097   // CANNOT overflow. If this can be determined from the known bits of the
4098   // operands the above signedAddMayOverflow() check will have already done so.
4099   // The only other way to improve on the known bits is from an assumption, so
4100   // call computeKnownBitsFromAssume() directly.
4101   bool LHSOrRHSKnownNonNegative =
4102       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4103   bool LHSOrRHSKnownNegative =
4104       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4105   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4106     KnownBits AddKnown(LHSRange.getBitWidth());
4107     computeKnownBitsFromAssume(
4108         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4109     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4110         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4111       return OverflowResult::NeverOverflows;
4112   }
4113 
4114   return OverflowResult::MayOverflow;
4115 }
4116 
4117 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4118                                                    const Value *RHS,
4119                                                    const DataLayout &DL,
4120                                                    AssumptionCache *AC,
4121                                                    const Instruction *CxtI,
4122                                                    const DominatorTree *DT) {
4123   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4124       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4125   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4126       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4127   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4128 }
4129 
4130 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4131                                                  const Value *RHS,
4132                                                  const DataLayout &DL,
4133                                                  AssumptionCache *AC,
4134                                                  const Instruction *CxtI,
4135                                                  const DominatorTree *DT) {
4136   // If LHS and RHS each have at least two sign bits, the subtraction
4137   // cannot overflow.
4138   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4139       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4140     return OverflowResult::NeverOverflows;
4141 
4142   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4143       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4144   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4145       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4146   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4147 }
4148 
4149 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4150                                      const DominatorTree &DT) {
4151   SmallVector<const BranchInst *, 2> GuardingBranches;
4152   SmallVector<const ExtractValueInst *, 2> Results;
4153 
4154   for (const User *U : WO->users()) {
4155     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4156       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4157 
4158       if (EVI->getIndices()[0] == 0)
4159         Results.push_back(EVI);
4160       else {
4161         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4162 
4163         for (const auto *U : EVI->users())
4164           if (const auto *B = dyn_cast<BranchInst>(U)) {
4165             assert(B->isConditional() && "How else is it using an i1?");
4166             GuardingBranches.push_back(B);
4167           }
4168       }
4169     } else {
4170       // We are using the aggregate directly in a way we don't want to analyze
4171       // here (storing it to a global, say).
4172       return false;
4173     }
4174   }
4175 
4176   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4177     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4178     if (!NoWrapEdge.isSingleEdge())
4179       return false;
4180 
4181     // Check if all users of the add are provably no-wrap.
4182     for (const auto *Result : Results) {
4183       // If the extractvalue itself is not executed on overflow, the we don't
4184       // need to check each use separately, since domination is transitive.
4185       if (DT.dominates(NoWrapEdge, Result->getParent()))
4186         continue;
4187 
4188       for (auto &RU : Result->uses())
4189         if (!DT.dominates(NoWrapEdge, RU))
4190           return false;
4191     }
4192 
4193     return true;
4194   };
4195 
4196   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4197 }
4198 
4199 
4200 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
4201                                                  const DataLayout &DL,
4202                                                  AssumptionCache *AC,
4203                                                  const Instruction *CxtI,
4204                                                  const DominatorTree *DT) {
4205   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
4206                                        Add, DL, AC, CxtI, DT);
4207 }
4208 
4209 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
4210                                                  const Value *RHS,
4211                                                  const DataLayout &DL,
4212                                                  AssumptionCache *AC,
4213                                                  const Instruction *CxtI,
4214                                                  const DominatorTree *DT) {
4215   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4216 }
4217 
4218 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
4219   // A memory operation returns normally if it isn't volatile. A volatile
4220   // operation is allowed to trap.
4221   //
4222   // An atomic operation isn't guaranteed to return in a reasonable amount of
4223   // time because it's possible for another thread to interfere with it for an
4224   // arbitrary length of time, but programs aren't allowed to rely on that.
4225   if (const LoadInst *LI = dyn_cast<LoadInst>(I))
4226     return !LI->isVolatile();
4227   if (const StoreInst *SI = dyn_cast<StoreInst>(I))
4228     return !SI->isVolatile();
4229   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
4230     return !CXI->isVolatile();
4231   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
4232     return !RMWI->isVolatile();
4233   if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
4234     return !MII->isVolatile();
4235 
4236   // If there is no successor, then execution can't transfer to it.
4237   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4238     return !CRI->unwindsToCaller();
4239   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4240     return !CatchSwitch->unwindsToCaller();
4241   if (isa<ResumeInst>(I))
4242     return false;
4243   if (isa<ReturnInst>(I))
4244     return false;
4245   if (isa<UnreachableInst>(I))
4246     return false;
4247 
4248   // Calls can throw, or contain an infinite loop, or kill the process.
4249   if (auto CS = ImmutableCallSite(I)) {
4250     // Call sites that throw have implicit non-local control flow.
4251     if (!CS.doesNotThrow())
4252       return false;
4253 
4254     // A function which doens't throw and has "willreturn" attribute will
4255     // always return.
4256     if (CS.hasFnAttr(Attribute::WillReturn))
4257       return true;
4258 
4259     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4260     // etc. and thus not return.  However, LLVM already assumes that
4261     //
4262     //  - Thread exiting actions are modeled as writes to memory invisible to
4263     //    the program.
4264     //
4265     //  - Loops that don't have side effects (side effects are volatile/atomic
4266     //    stores and IO) always terminate (see http://llvm.org/PR965).
4267     //    Furthermore IO itself is also modeled as writes to memory invisible to
4268     //    the program.
4269     //
4270     // We rely on those assumptions here, and use the memory effects of the call
4271     // target as a proxy for checking that it always returns.
4272 
4273     // FIXME: This isn't aggressive enough; a call which only writes to a global
4274     // is guaranteed to return.
4275     return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
4276            match(I, m_Intrinsic<Intrinsic::assume>()) ||
4277            match(I, m_Intrinsic<Intrinsic::sideeffect>()) ||
4278            match(I, m_Intrinsic<Intrinsic::experimental_widenable_condition>());
4279   }
4280 
4281   // Other instructions return normally.
4282   return true;
4283 }
4284 
4285 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
4286   // TODO: This is slightly conservative for invoke instruction since exiting
4287   // via an exception *is* normal control for them.
4288   for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
4289     if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
4290       return false;
4291   return true;
4292 }
4293 
4294 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
4295                                                   const Loop *L) {
4296   // The loop header is guaranteed to be executed for every iteration.
4297   //
4298   // FIXME: Relax this constraint to cover all basic blocks that are
4299   // guaranteed to be executed at every iteration.
4300   if (I->getParent() != L->getHeader()) return false;
4301 
4302   for (const Instruction &LI : *L->getHeader()) {
4303     if (&LI == I) return true;
4304     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
4305   }
4306   llvm_unreachable("Instruction not contained in its own parent basic block.");
4307 }
4308 
4309 bool llvm::propagatesFullPoison(const Instruction *I) {
4310   // TODO: This should include all instructions apart from phis, selects and
4311   // call-like instructions.
4312   switch (I->getOpcode()) {
4313   case Instruction::Add:
4314   case Instruction::Sub:
4315   case Instruction::Xor:
4316   case Instruction::Trunc:
4317   case Instruction::BitCast:
4318   case Instruction::AddrSpaceCast:
4319   case Instruction::Mul:
4320   case Instruction::Shl:
4321   case Instruction::GetElementPtr:
4322     // These operations all propagate poison unconditionally. Note that poison
4323     // is not any particular value, so xor or subtraction of poison with
4324     // itself still yields poison, not zero.
4325     return true;
4326 
4327   case Instruction::AShr:
4328   case Instruction::SExt:
4329     // For these operations, one bit of the input is replicated across
4330     // multiple output bits. A replicated poison bit is still poison.
4331     return true;
4332 
4333   case Instruction::ICmp:
4334     // Comparing poison with any value yields poison.  This is why, for
4335     // instance, x s< (x +nsw 1) can be folded to true.
4336     return true;
4337 
4338   default:
4339     return false;
4340   }
4341 }
4342 
4343 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
4344   switch (I->getOpcode()) {
4345     case Instruction::Store:
4346       return cast<StoreInst>(I)->getPointerOperand();
4347 
4348     case Instruction::Load:
4349       return cast<LoadInst>(I)->getPointerOperand();
4350 
4351     case Instruction::AtomicCmpXchg:
4352       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4353 
4354     case Instruction::AtomicRMW:
4355       return cast<AtomicRMWInst>(I)->getPointerOperand();
4356 
4357     case Instruction::UDiv:
4358     case Instruction::SDiv:
4359     case Instruction::URem:
4360     case Instruction::SRem:
4361       return I->getOperand(1);
4362 
4363     default:
4364       // Note: It's really tempting to think that a conditional branch or
4365       // switch should be listed here, but that's incorrect.  It's not
4366       // branching off of poison which is UB, it is executing a side effecting
4367       // instruction which follows the branch.
4368       return nullptr;
4369   }
4370 }
4371 
4372 bool llvm::mustTriggerUB(const Instruction *I,
4373                          const SmallSet<const Value *, 16>& KnownPoison) {
4374   auto *NotPoison = getGuaranteedNonFullPoisonOp(I);
4375   return (NotPoison && KnownPoison.count(NotPoison));
4376 }
4377 
4378 
4379 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
4380   // We currently only look for uses of poison values within the same basic
4381   // block, as that makes it easier to guarantee that the uses will be
4382   // executed given that PoisonI is executed.
4383   //
4384   // FIXME: Expand this to consider uses beyond the same basic block. To do
4385   // this, look out for the distinction between post-dominance and strong
4386   // post-dominance.
4387   const BasicBlock *BB = PoisonI->getParent();
4388 
4389   // Set of instructions that we have proved will yield poison if PoisonI
4390   // does.
4391   SmallSet<const Value *, 16> YieldsPoison;
4392   SmallSet<const BasicBlock *, 4> Visited;
4393   YieldsPoison.insert(PoisonI);
4394   Visited.insert(PoisonI->getParent());
4395 
4396   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4397 
4398   unsigned Iter = 0;
4399   while (Iter++ < MaxDepth) {
4400     for (auto &I : make_range(Begin, End)) {
4401       if (&I != PoisonI) {
4402         if (mustTriggerUB(&I, YieldsPoison))
4403           return true;
4404         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4405           return false;
4406       }
4407 
4408       // Mark poison that propagates from I through uses of I.
4409       if (YieldsPoison.count(&I)) {
4410         for (const User *User : I.users()) {
4411           const Instruction *UserI = cast<Instruction>(User);
4412           if (propagatesFullPoison(UserI))
4413             YieldsPoison.insert(User);
4414         }
4415       }
4416     }
4417 
4418     if (auto *NextBB = BB->getSingleSuccessor()) {
4419       if (Visited.insert(NextBB).second) {
4420         BB = NextBB;
4421         Begin = BB->getFirstNonPHI()->getIterator();
4422         End = BB->end();
4423         continue;
4424       }
4425     }
4426 
4427     break;
4428   }
4429   return false;
4430 }
4431 
4432 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4433   if (FMF.noNaNs())
4434     return true;
4435 
4436   if (auto *C = dyn_cast<ConstantFP>(V))
4437     return !C->isNaN();
4438 
4439   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4440     if (!C->getElementType()->isFloatingPointTy())
4441       return false;
4442     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4443       if (C->getElementAsAPFloat(I).isNaN())
4444         return false;
4445     }
4446     return true;
4447   }
4448 
4449   return false;
4450 }
4451 
4452 static bool isKnownNonZero(const Value *V) {
4453   if (auto *C = dyn_cast<ConstantFP>(V))
4454     return !C->isZero();
4455 
4456   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4457     if (!C->getElementType()->isFloatingPointTy())
4458       return false;
4459     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4460       if (C->getElementAsAPFloat(I).isZero())
4461         return false;
4462     }
4463     return true;
4464   }
4465 
4466   return false;
4467 }
4468 
4469 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4470 /// Given non-min/max outer cmp/select from the clamp pattern this
4471 /// function recognizes if it can be substitued by a "canonical" min/max
4472 /// pattern.
4473 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4474                                                Value *CmpLHS, Value *CmpRHS,
4475                                                Value *TrueVal, Value *FalseVal,
4476                                                Value *&LHS, Value *&RHS) {
4477   // Try to match
4478   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4479   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4480   // and return description of the outer Max/Min.
4481 
4482   // First, check if select has inverse order:
4483   if (CmpRHS == FalseVal) {
4484     std::swap(TrueVal, FalseVal);
4485     Pred = CmpInst::getInversePredicate(Pred);
4486   }
4487 
4488   // Assume success now. If there's no match, callers should not use these anyway.
4489   LHS = TrueVal;
4490   RHS = FalseVal;
4491 
4492   const APFloat *FC1;
4493   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4494     return {SPF_UNKNOWN, SPNB_NA, false};
4495 
4496   const APFloat *FC2;
4497   switch (Pred) {
4498   case CmpInst::FCMP_OLT:
4499   case CmpInst::FCMP_OLE:
4500   case CmpInst::FCMP_ULT:
4501   case CmpInst::FCMP_ULE:
4502     if (match(FalseVal,
4503               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4504                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4505         FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4506       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4507     break;
4508   case CmpInst::FCMP_OGT:
4509   case CmpInst::FCMP_OGE:
4510   case CmpInst::FCMP_UGT:
4511   case CmpInst::FCMP_UGE:
4512     if (match(FalseVal,
4513               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4514                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4515         FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4516       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4517     break;
4518   default:
4519     break;
4520   }
4521 
4522   return {SPF_UNKNOWN, SPNB_NA, false};
4523 }
4524 
4525 /// Recognize variations of:
4526 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4527 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
4528                                       Value *CmpLHS, Value *CmpRHS,
4529                                       Value *TrueVal, Value *FalseVal) {
4530   // Swap the select operands and predicate to match the patterns below.
4531   if (CmpRHS != TrueVal) {
4532     Pred = ICmpInst::getSwappedPredicate(Pred);
4533     std::swap(TrueVal, FalseVal);
4534   }
4535   const APInt *C1;
4536   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4537     const APInt *C2;
4538     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4539     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4540         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4541       return {SPF_SMAX, SPNB_NA, false};
4542 
4543     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4544     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4545         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4546       return {SPF_SMIN, SPNB_NA, false};
4547 
4548     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4549     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4550         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4551       return {SPF_UMAX, SPNB_NA, false};
4552 
4553     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4554     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4555         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4556       return {SPF_UMIN, SPNB_NA, false};
4557   }
4558   return {SPF_UNKNOWN, SPNB_NA, false};
4559 }
4560 
4561 /// Recognize variations of:
4562 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4563 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
4564                                                Value *CmpLHS, Value *CmpRHS,
4565                                                Value *TVal, Value *FVal,
4566                                                unsigned Depth) {
4567   // TODO: Allow FP min/max with nnan/nsz.
4568   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4569 
4570   Value *A, *B;
4571   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4572   if (!SelectPatternResult::isMinOrMax(L.Flavor))
4573     return {SPF_UNKNOWN, SPNB_NA, false};
4574 
4575   Value *C, *D;
4576   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4577   if (L.Flavor != R.Flavor)
4578     return {SPF_UNKNOWN, SPNB_NA, false};
4579 
4580   // We have something like: x Pred y ? min(a, b) : min(c, d).
4581   // Try to match the compare to the min/max operations of the select operands.
4582   // First, make sure we have the right compare predicate.
4583   switch (L.Flavor) {
4584   case SPF_SMIN:
4585     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4586       Pred = ICmpInst::getSwappedPredicate(Pred);
4587       std::swap(CmpLHS, CmpRHS);
4588     }
4589     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4590       break;
4591     return {SPF_UNKNOWN, SPNB_NA, false};
4592   case SPF_SMAX:
4593     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4594       Pred = ICmpInst::getSwappedPredicate(Pred);
4595       std::swap(CmpLHS, CmpRHS);
4596     }
4597     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4598       break;
4599     return {SPF_UNKNOWN, SPNB_NA, false};
4600   case SPF_UMIN:
4601     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4602       Pred = ICmpInst::getSwappedPredicate(Pred);
4603       std::swap(CmpLHS, CmpRHS);
4604     }
4605     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4606       break;
4607     return {SPF_UNKNOWN, SPNB_NA, false};
4608   case SPF_UMAX:
4609     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4610       Pred = ICmpInst::getSwappedPredicate(Pred);
4611       std::swap(CmpLHS, CmpRHS);
4612     }
4613     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4614       break;
4615     return {SPF_UNKNOWN, SPNB_NA, false};
4616   default:
4617     return {SPF_UNKNOWN, SPNB_NA, false};
4618   }
4619 
4620   // If there is a common operand in the already matched min/max and the other
4621   // min/max operands match the compare operands (either directly or inverted),
4622   // then this is min/max of the same flavor.
4623 
4624   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4625   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4626   if (D == B) {
4627     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4628                                          match(A, m_Not(m_Specific(CmpRHS)))))
4629       return {L.Flavor, SPNB_NA, false};
4630   }
4631   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4632   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4633   if (C == B) {
4634     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4635                                          match(A, m_Not(m_Specific(CmpRHS)))))
4636       return {L.Flavor, SPNB_NA, false};
4637   }
4638   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4639   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4640   if (D == A) {
4641     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4642                                          match(B, m_Not(m_Specific(CmpRHS)))))
4643       return {L.Flavor, SPNB_NA, false};
4644   }
4645   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4646   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4647   if (C == A) {
4648     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4649                                          match(B, m_Not(m_Specific(CmpRHS)))))
4650       return {L.Flavor, SPNB_NA, false};
4651   }
4652 
4653   return {SPF_UNKNOWN, SPNB_NA, false};
4654 }
4655 
4656 /// Match non-obvious integer minimum and maximum sequences.
4657 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4658                                        Value *CmpLHS, Value *CmpRHS,
4659                                        Value *TrueVal, Value *FalseVal,
4660                                        Value *&LHS, Value *&RHS,
4661                                        unsigned Depth) {
4662   // Assume success. If there's no match, callers should not use these anyway.
4663   LHS = TrueVal;
4664   RHS = FalseVal;
4665 
4666   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4667   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4668     return SPR;
4669 
4670   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4671   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4672     return SPR;
4673 
4674   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4675     return {SPF_UNKNOWN, SPNB_NA, false};
4676 
4677   // Z = X -nsw Y
4678   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4679   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4680   if (match(TrueVal, m_Zero()) &&
4681       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4682     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4683 
4684   // Z = X -nsw Y
4685   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4686   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4687   if (match(FalseVal, m_Zero()) &&
4688       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4689     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4690 
4691   const APInt *C1;
4692   if (!match(CmpRHS, m_APInt(C1)))
4693     return {SPF_UNKNOWN, SPNB_NA, false};
4694 
4695   // An unsigned min/max can be written with a signed compare.
4696   const APInt *C2;
4697   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4698       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4699     // Is the sign bit set?
4700     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4701     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4702     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4703         C2->isMaxSignedValue())
4704       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4705 
4706     // Is the sign bit clear?
4707     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4708     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4709     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4710         C2->isMinSignedValue())
4711       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4712   }
4713 
4714   // Look through 'not' ops to find disguised signed min/max.
4715   // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4716   // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4717   if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4718       match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4719     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4720 
4721   // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4722   // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4723   if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4724       match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4725     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4726 
4727   return {SPF_UNKNOWN, SPNB_NA, false};
4728 }
4729 
4730 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
4731   assert(X && Y && "Invalid operand");
4732 
4733   // X = sub (0, Y) || X = sub nsw (0, Y)
4734   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
4735       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
4736     return true;
4737 
4738   // Y = sub (0, X) || Y = sub nsw (0, X)
4739   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
4740       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
4741     return true;
4742 
4743   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
4744   Value *A, *B;
4745   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
4746                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
4747          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
4748                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
4749 }
4750 
4751 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4752                                               FastMathFlags FMF,
4753                                               Value *CmpLHS, Value *CmpRHS,
4754                                               Value *TrueVal, Value *FalseVal,
4755                                               Value *&LHS, Value *&RHS,
4756                                               unsigned Depth) {
4757   if (CmpInst::isFPPredicate(Pred)) {
4758     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
4759     // 0.0 operand, set the compare's 0.0 operands to that same value for the
4760     // purpose of identifying min/max. Disregard vector constants with undefined
4761     // elements because those can not be back-propagated for analysis.
4762     Value *OutputZeroVal = nullptr;
4763     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
4764         !cast<Constant>(TrueVal)->containsUndefElement())
4765       OutputZeroVal = TrueVal;
4766     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
4767              !cast<Constant>(FalseVal)->containsUndefElement())
4768       OutputZeroVal = FalseVal;
4769 
4770     if (OutputZeroVal) {
4771       if (match(CmpLHS, m_AnyZeroFP()))
4772         CmpLHS = OutputZeroVal;
4773       if (match(CmpRHS, m_AnyZeroFP()))
4774         CmpRHS = OutputZeroVal;
4775     }
4776   }
4777 
4778   LHS = CmpLHS;
4779   RHS = CmpRHS;
4780 
4781   // Signed zero may return inconsistent results between implementations.
4782   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4783   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4784   // Therefore, we behave conservatively and only proceed if at least one of the
4785   // operands is known to not be zero or if we don't care about signed zero.
4786   switch (Pred) {
4787   default: break;
4788   // FIXME: Include OGT/OLT/UGT/ULT.
4789   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4790   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4791     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4792         !isKnownNonZero(CmpRHS))
4793       return {SPF_UNKNOWN, SPNB_NA, false};
4794   }
4795 
4796   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4797   bool Ordered = false;
4798 
4799   // When given one NaN and one non-NaN input:
4800   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4801   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4802   //     ordered comparison fails), which could be NaN or non-NaN.
4803   // so here we discover exactly what NaN behavior is required/accepted.
4804   if (CmpInst::isFPPredicate(Pred)) {
4805     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4806     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4807 
4808     if (LHSSafe && RHSSafe) {
4809       // Both operands are known non-NaN.
4810       NaNBehavior = SPNB_RETURNS_ANY;
4811     } else if (CmpInst::isOrdered(Pred)) {
4812       // An ordered comparison will return false when given a NaN, so it
4813       // returns the RHS.
4814       Ordered = true;
4815       if (LHSSafe)
4816         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4817         NaNBehavior = SPNB_RETURNS_NAN;
4818       else if (RHSSafe)
4819         NaNBehavior = SPNB_RETURNS_OTHER;
4820       else
4821         // Completely unsafe.
4822         return {SPF_UNKNOWN, SPNB_NA, false};
4823     } else {
4824       Ordered = false;
4825       // An unordered comparison will return true when given a NaN, so it
4826       // returns the LHS.
4827       if (LHSSafe)
4828         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4829         NaNBehavior = SPNB_RETURNS_OTHER;
4830       else if (RHSSafe)
4831         NaNBehavior = SPNB_RETURNS_NAN;
4832       else
4833         // Completely unsafe.
4834         return {SPF_UNKNOWN, SPNB_NA, false};
4835     }
4836   }
4837 
4838   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4839     std::swap(CmpLHS, CmpRHS);
4840     Pred = CmpInst::getSwappedPredicate(Pred);
4841     if (NaNBehavior == SPNB_RETURNS_NAN)
4842       NaNBehavior = SPNB_RETURNS_OTHER;
4843     else if (NaNBehavior == SPNB_RETURNS_OTHER)
4844       NaNBehavior = SPNB_RETURNS_NAN;
4845     Ordered = !Ordered;
4846   }
4847 
4848   // ([if]cmp X, Y) ? X : Y
4849   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4850     switch (Pred) {
4851     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4852     case ICmpInst::ICMP_UGT:
4853     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4854     case ICmpInst::ICMP_SGT:
4855     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4856     case ICmpInst::ICMP_ULT:
4857     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4858     case ICmpInst::ICMP_SLT:
4859     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4860     case FCmpInst::FCMP_UGT:
4861     case FCmpInst::FCMP_UGE:
4862     case FCmpInst::FCMP_OGT:
4863     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4864     case FCmpInst::FCMP_ULT:
4865     case FCmpInst::FCMP_ULE:
4866     case FCmpInst::FCMP_OLT:
4867     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4868     }
4869   }
4870 
4871   if (isKnownNegation(TrueVal, FalseVal)) {
4872     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
4873     // match against either LHS or sext(LHS).
4874     auto MaybeSExtCmpLHS =
4875         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
4876     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
4877     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
4878     if (match(TrueVal, MaybeSExtCmpLHS)) {
4879       // Set the return values. If the compare uses the negated value (-X >s 0),
4880       // swap the return values because the negated value is always 'RHS'.
4881       LHS = TrueVal;
4882       RHS = FalseVal;
4883       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
4884         std::swap(LHS, RHS);
4885 
4886       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
4887       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
4888       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4889         return {SPF_ABS, SPNB_NA, false};
4890 
4891       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
4892       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
4893         return {SPF_ABS, SPNB_NA, false};
4894 
4895       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
4896       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
4897       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4898         return {SPF_NABS, SPNB_NA, false};
4899     }
4900     else if (match(FalseVal, MaybeSExtCmpLHS)) {
4901       // Set the return values. If the compare uses the negated value (-X >s 0),
4902       // swap the return values because the negated value is always 'RHS'.
4903       LHS = FalseVal;
4904       RHS = TrueVal;
4905       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
4906         std::swap(LHS, RHS);
4907 
4908       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
4909       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
4910       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4911         return {SPF_NABS, SPNB_NA, false};
4912 
4913       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
4914       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
4915       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4916         return {SPF_ABS, SPNB_NA, false};
4917     }
4918   }
4919 
4920   if (CmpInst::isIntPredicate(Pred))
4921     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4922 
4923   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4924   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4925   // semantics than minNum. Be conservative in such case.
4926   if (NaNBehavior != SPNB_RETURNS_ANY ||
4927       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4928        !isKnownNonZero(CmpRHS)))
4929     return {SPF_UNKNOWN, SPNB_NA, false};
4930 
4931   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4932 }
4933 
4934 /// Helps to match a select pattern in case of a type mismatch.
4935 ///
4936 /// The function processes the case when type of true and false values of a
4937 /// select instruction differs from type of the cmp instruction operands because
4938 /// of a cast instruction. The function checks if it is legal to move the cast
4939 /// operation after "select". If yes, it returns the new second value of
4940 /// "select" (with the assumption that cast is moved):
4941 /// 1. As operand of cast instruction when both values of "select" are same cast
4942 /// instructions.
4943 /// 2. As restored constant (by applying reverse cast operation) when the first
4944 /// value of the "select" is a cast operation and the second value is a
4945 /// constant.
4946 /// NOTE: We return only the new second value because the first value could be
4947 /// accessed as operand of cast instruction.
4948 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4949                               Instruction::CastOps *CastOp) {
4950   auto *Cast1 = dyn_cast<CastInst>(V1);
4951   if (!Cast1)
4952     return nullptr;
4953 
4954   *CastOp = Cast1->getOpcode();
4955   Type *SrcTy = Cast1->getSrcTy();
4956   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4957     // If V1 and V2 are both the same cast from the same type, look through V1.
4958     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4959       return Cast2->getOperand(0);
4960     return nullptr;
4961   }
4962 
4963   auto *C = dyn_cast<Constant>(V2);
4964   if (!C)
4965     return nullptr;
4966 
4967   Constant *CastedTo = nullptr;
4968   switch (*CastOp) {
4969   case Instruction::ZExt:
4970     if (CmpI->isUnsigned())
4971       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4972     break;
4973   case Instruction::SExt:
4974     if (CmpI->isSigned())
4975       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4976     break;
4977   case Instruction::Trunc:
4978     Constant *CmpConst;
4979     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4980         CmpConst->getType() == SrcTy) {
4981       // Here we have the following case:
4982       //
4983       //   %cond = cmp iN %x, CmpConst
4984       //   %tr = trunc iN %x to iK
4985       //   %narrowsel = select i1 %cond, iK %t, iK C
4986       //
4987       // We can always move trunc after select operation:
4988       //
4989       //   %cond = cmp iN %x, CmpConst
4990       //   %widesel = select i1 %cond, iN %x, iN CmpConst
4991       //   %tr = trunc iN %widesel to iK
4992       //
4993       // Note that C could be extended in any way because we don't care about
4994       // upper bits after truncation. It can't be abs pattern, because it would
4995       // look like:
4996       //
4997       //   select i1 %cond, x, -x.
4998       //
4999       // So only min/max pattern could be matched. Such match requires widened C
5000       // == CmpConst. That is why set widened C = CmpConst, condition trunc
5001       // CmpConst == C is checked below.
5002       CastedTo = CmpConst;
5003     } else {
5004       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5005     }
5006     break;
5007   case Instruction::FPTrunc:
5008     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5009     break;
5010   case Instruction::FPExt:
5011     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5012     break;
5013   case Instruction::FPToUI:
5014     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5015     break;
5016   case Instruction::FPToSI:
5017     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5018     break;
5019   case Instruction::UIToFP:
5020     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5021     break;
5022   case Instruction::SIToFP:
5023     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5024     break;
5025   default:
5026     break;
5027   }
5028 
5029   if (!CastedTo)
5030     return nullptr;
5031 
5032   // Make sure the cast doesn't lose any information.
5033   Constant *CastedBack =
5034       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5035   if (CastedBack != C)
5036     return nullptr;
5037 
5038   return CastedTo;
5039 }
5040 
5041 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5042                                              Instruction::CastOps *CastOp,
5043                                              unsigned Depth) {
5044   if (Depth >= MaxDepth)
5045     return {SPF_UNKNOWN, SPNB_NA, false};
5046 
5047   SelectInst *SI = dyn_cast<SelectInst>(V);
5048   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5049 
5050   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5051   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5052 
5053   Value *TrueVal = SI->getTrueValue();
5054   Value *FalseVal = SI->getFalseValue();
5055 
5056   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5057                                             CastOp, Depth);
5058 }
5059 
5060 SelectPatternResult llvm::matchDecomposedSelectPattern(
5061     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5062     Instruction::CastOps *CastOp, unsigned Depth) {
5063   CmpInst::Predicate Pred = CmpI->getPredicate();
5064   Value *CmpLHS = CmpI->getOperand(0);
5065   Value *CmpRHS = CmpI->getOperand(1);
5066   FastMathFlags FMF;
5067   if (isa<FPMathOperator>(CmpI))
5068     FMF = CmpI->getFastMathFlags();
5069 
5070   // Bail out early.
5071   if (CmpI->isEquality())
5072     return {SPF_UNKNOWN, SPNB_NA, false};
5073 
5074   // Deal with type mismatches.
5075   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5076     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5077       // If this is a potential fmin/fmax with a cast to integer, then ignore
5078       // -0.0 because there is no corresponding integer value.
5079       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5080         FMF.setNoSignedZeros();
5081       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5082                                   cast<CastInst>(TrueVal)->getOperand(0), C,
5083                                   LHS, RHS, Depth);
5084     }
5085     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5086       // If this is a potential fmin/fmax with a cast to integer, then ignore
5087       // -0.0 because there is no corresponding integer value.
5088       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5089         FMF.setNoSignedZeros();
5090       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5091                                   C, cast<CastInst>(FalseVal)->getOperand(0),
5092                                   LHS, RHS, Depth);
5093     }
5094   }
5095   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5096                               LHS, RHS, Depth);
5097 }
5098 
5099 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5100   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5101   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5102   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5103   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5104   if (SPF == SPF_FMINNUM)
5105     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5106   if (SPF == SPF_FMAXNUM)
5107     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5108   llvm_unreachable("unhandled!");
5109 }
5110 
5111 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5112   if (SPF == SPF_SMIN) return SPF_SMAX;
5113   if (SPF == SPF_UMIN) return SPF_UMAX;
5114   if (SPF == SPF_SMAX) return SPF_SMIN;
5115   if (SPF == SPF_UMAX) return SPF_UMIN;
5116   llvm_unreachable("unhandled!");
5117 }
5118 
5119 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
5120   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
5121 }
5122 
5123 /// Return true if "icmp Pred LHS RHS" is always true.
5124 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5125                             const Value *RHS, const DataLayout &DL,
5126                             unsigned Depth) {
5127   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
5128   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5129     return true;
5130 
5131   switch (Pred) {
5132   default:
5133     return false;
5134 
5135   case CmpInst::ICMP_SLE: {
5136     const APInt *C;
5137 
5138     // LHS s<= LHS +_{nsw} C   if C >= 0
5139     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5140       return !C->isNegative();
5141     return false;
5142   }
5143 
5144   case CmpInst::ICMP_ULE: {
5145     const APInt *C;
5146 
5147     // LHS u<= LHS +_{nuw} C   for any C
5148     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5149       return true;
5150 
5151     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5152     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5153                                        const Value *&X,
5154                                        const APInt *&CA, const APInt *&CB) {
5155       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
5156           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
5157         return true;
5158 
5159       // If X & C == 0 then (X | C) == X +_{nuw} C
5160       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
5161           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
5162         KnownBits Known(CA->getBitWidth());
5163         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
5164                          /*CxtI*/ nullptr, /*DT*/ nullptr);
5165         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
5166           return true;
5167       }
5168 
5169       return false;
5170     };
5171 
5172     const Value *X;
5173     const APInt *CLHS, *CRHS;
5174     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
5175       return CLHS->ule(*CRHS);
5176 
5177     return false;
5178   }
5179   }
5180 }
5181 
5182 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
5183 /// ALHS ARHS" is true.  Otherwise, return None.
5184 static Optional<bool>
5185 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
5186                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
5187                       const DataLayout &DL, unsigned Depth) {
5188   switch (Pred) {
5189   default:
5190     return None;
5191 
5192   case CmpInst::ICMP_SLT:
5193   case CmpInst::ICMP_SLE:
5194     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
5195         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
5196       return true;
5197     return None;
5198 
5199   case CmpInst::ICMP_ULT:
5200   case CmpInst::ICMP_ULE:
5201     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
5202         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
5203       return true;
5204     return None;
5205   }
5206 }
5207 
5208 /// Return true if the operands of the two compares match.  IsSwappedOps is true
5209 /// when the operands match, but are swapped.
5210 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
5211                           const Value *BLHS, const Value *BRHS,
5212                           bool &IsSwappedOps) {
5213 
5214   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
5215   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
5216   return IsMatchingOps || IsSwappedOps;
5217 }
5218 
5219 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
5220 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
5221 /// Otherwise, return None if we can't infer anything.
5222 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
5223                                                     CmpInst::Predicate BPred,
5224                                                     bool AreSwappedOps) {
5225   // Canonicalize the predicate as if the operands were not commuted.
5226   if (AreSwappedOps)
5227     BPred = ICmpInst::getSwappedPredicate(BPred);
5228 
5229   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
5230     return true;
5231   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
5232     return false;
5233 
5234   return None;
5235 }
5236 
5237 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
5238 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
5239 /// Otherwise, return None if we can't infer anything.
5240 static Optional<bool>
5241 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
5242                                  const ConstantInt *C1,
5243                                  CmpInst::Predicate BPred,
5244                                  const ConstantInt *C2) {
5245   ConstantRange DomCR =
5246       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
5247   ConstantRange CR =
5248       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
5249   ConstantRange Intersection = DomCR.intersectWith(CR);
5250   ConstantRange Difference = DomCR.difference(CR);
5251   if (Intersection.isEmptySet())
5252     return false;
5253   if (Difference.isEmptySet())
5254     return true;
5255   return None;
5256 }
5257 
5258 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
5259 /// false.  Otherwise, return None if we can't infer anything.
5260 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
5261                                          const ICmpInst *RHS,
5262                                          const DataLayout &DL, bool LHSIsTrue,
5263                                          unsigned Depth) {
5264   Value *ALHS = LHS->getOperand(0);
5265   Value *ARHS = LHS->getOperand(1);
5266   // The rest of the logic assumes the LHS condition is true.  If that's not the
5267   // case, invert the predicate to make it so.
5268   ICmpInst::Predicate APred =
5269       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
5270 
5271   Value *BLHS = RHS->getOperand(0);
5272   Value *BRHS = RHS->getOperand(1);
5273   ICmpInst::Predicate BPred = RHS->getPredicate();
5274 
5275   // Can we infer anything when the two compares have matching operands?
5276   bool AreSwappedOps;
5277   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
5278     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
5279             APred, BPred, AreSwappedOps))
5280       return Implication;
5281     // No amount of additional analysis will infer the second condition, so
5282     // early exit.
5283     return None;
5284   }
5285 
5286   // Can we infer anything when the LHS operands match and the RHS operands are
5287   // constants (not necessarily matching)?
5288   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
5289     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
5290             APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
5291       return Implication;
5292     // No amount of additional analysis will infer the second condition, so
5293     // early exit.
5294     return None;
5295   }
5296 
5297   if (APred == BPred)
5298     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
5299   return None;
5300 }
5301 
5302 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
5303 /// false.  Otherwise, return None if we can't infer anything.  We expect the
5304 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
5305 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
5306                                          const ICmpInst *RHS,
5307                                          const DataLayout &DL, bool LHSIsTrue,
5308                                          unsigned Depth) {
5309   // The LHS must be an 'or' or an 'and' instruction.
5310   assert((LHS->getOpcode() == Instruction::And ||
5311           LHS->getOpcode() == Instruction::Or) &&
5312          "Expected LHS to be 'and' or 'or'.");
5313 
5314   assert(Depth <= MaxDepth && "Hit recursion limit");
5315 
5316   // If the result of an 'or' is false, then we know both legs of the 'or' are
5317   // false.  Similarly, if the result of an 'and' is true, then we know both
5318   // legs of the 'and' are true.
5319   Value *ALHS, *ARHS;
5320   if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
5321       (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
5322     // FIXME: Make this non-recursion.
5323     if (Optional<bool> Implication =
5324             isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
5325       return Implication;
5326     if (Optional<bool> Implication =
5327             isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
5328       return Implication;
5329     return None;
5330   }
5331   return None;
5332 }
5333 
5334 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
5335                                         const DataLayout &DL, bool LHSIsTrue,
5336                                         unsigned Depth) {
5337   // Bail out when we hit the limit.
5338   if (Depth == MaxDepth)
5339     return None;
5340 
5341   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
5342   // example.
5343   if (LHS->getType() != RHS->getType())
5344     return None;
5345 
5346   Type *OpTy = LHS->getType();
5347   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
5348 
5349   // LHS ==> RHS by definition
5350   if (LHS == RHS)
5351     return LHSIsTrue;
5352 
5353   // FIXME: Extending the code below to handle vectors.
5354   if (OpTy->isVectorTy())
5355     return None;
5356 
5357   assert(OpTy->isIntegerTy(1) && "implied by above");
5358 
5359   // Both LHS and RHS are icmps.
5360   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
5361   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
5362   if (LHSCmp && RHSCmp)
5363     return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
5364 
5365   // The LHS should be an 'or' or an 'and' instruction.  We expect the RHS to be
5366   // an icmp. FIXME: Add support for and/or on the RHS.
5367   const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
5368   if (LHSBO && RHSCmp) {
5369     if ((LHSBO->getOpcode() == Instruction::And ||
5370          LHSBO->getOpcode() == Instruction::Or))
5371       return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
5372   }
5373   return None;
5374 }
5375 
5376 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
5377                                              const Instruction *ContextI,
5378                                              const DataLayout &DL) {
5379   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
5380   if (!ContextI || !ContextI->getParent())
5381     return None;
5382 
5383   // TODO: This is a poor/cheap way to determine dominance. Should we use a
5384   // dominator tree (eg, from a SimplifyQuery) instead?
5385   const BasicBlock *ContextBB = ContextI->getParent();
5386   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
5387   if (!PredBB)
5388     return None;
5389 
5390   // We need a conditional branch in the predecessor.
5391   Value *PredCond;
5392   BasicBlock *TrueBB, *FalseBB;
5393   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
5394     return None;
5395 
5396   // The branch should get simplified. Don't bother simplifying this condition.
5397   if (TrueBB == FalseBB)
5398     return None;
5399 
5400   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
5401          "Predecessor block does not point to successor?");
5402 
5403   // Is this condition implied by the predecessor condition?
5404   bool CondIsTrue = TrueBB == ContextBB;
5405   return isImpliedCondition(PredCond, Cond, DL, CondIsTrue);
5406 }
5407 
5408 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
5409                               APInt &Upper, const InstrInfoQuery &IIQ) {
5410   unsigned Width = Lower.getBitWidth();
5411   const APInt *C;
5412   switch (BO.getOpcode()) {
5413   case Instruction::Add:
5414     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
5415       // FIXME: If we have both nuw and nsw, we should reduce the range further.
5416       if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
5417         // 'add nuw x, C' produces [C, UINT_MAX].
5418         Lower = *C;
5419       } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
5420         if (C->isNegative()) {
5421           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
5422           Lower = APInt::getSignedMinValue(Width);
5423           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
5424         } else {
5425           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
5426           Lower = APInt::getSignedMinValue(Width) + *C;
5427           Upper = APInt::getSignedMaxValue(Width) + 1;
5428         }
5429       }
5430     }
5431     break;
5432 
5433   case Instruction::And:
5434     if (match(BO.getOperand(1), m_APInt(C)))
5435       // 'and x, C' produces [0, C].
5436       Upper = *C + 1;
5437     break;
5438 
5439   case Instruction::Or:
5440     if (match(BO.getOperand(1), m_APInt(C)))
5441       // 'or x, C' produces [C, UINT_MAX].
5442       Lower = *C;
5443     break;
5444 
5445   case Instruction::AShr:
5446     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
5447       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
5448       Lower = APInt::getSignedMinValue(Width).ashr(*C);
5449       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
5450     } else if (match(BO.getOperand(0), m_APInt(C))) {
5451       unsigned ShiftAmount = Width - 1;
5452       if (!C->isNullValue() && IIQ.isExact(&BO))
5453         ShiftAmount = C->countTrailingZeros();
5454       if (C->isNegative()) {
5455         // 'ashr C, x' produces [C, C >> (Width-1)]
5456         Lower = *C;
5457         Upper = C->ashr(ShiftAmount) + 1;
5458       } else {
5459         // 'ashr C, x' produces [C >> (Width-1), C]
5460         Lower = C->ashr(ShiftAmount);
5461         Upper = *C + 1;
5462       }
5463     }
5464     break;
5465 
5466   case Instruction::LShr:
5467     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
5468       // 'lshr x, C' produces [0, UINT_MAX >> C].
5469       Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
5470     } else if (match(BO.getOperand(0), m_APInt(C))) {
5471       // 'lshr C, x' produces [C >> (Width-1), C].
5472       unsigned ShiftAmount = Width - 1;
5473       if (!C->isNullValue() && IIQ.isExact(&BO))
5474         ShiftAmount = C->countTrailingZeros();
5475       Lower = C->lshr(ShiftAmount);
5476       Upper = *C + 1;
5477     }
5478     break;
5479 
5480   case Instruction::Shl:
5481     if (match(BO.getOperand(0), m_APInt(C))) {
5482       if (IIQ.hasNoUnsignedWrap(&BO)) {
5483         // 'shl nuw C, x' produces [C, C << CLZ(C)]
5484         Lower = *C;
5485         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
5486       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
5487         if (C->isNegative()) {
5488           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
5489           unsigned ShiftAmount = C->countLeadingOnes() - 1;
5490           Lower = C->shl(ShiftAmount);
5491           Upper = *C + 1;
5492         } else {
5493           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
5494           unsigned ShiftAmount = C->countLeadingZeros() - 1;
5495           Lower = *C;
5496           Upper = C->shl(ShiftAmount) + 1;
5497         }
5498       }
5499     }
5500     break;
5501 
5502   case Instruction::SDiv:
5503     if (match(BO.getOperand(1), m_APInt(C))) {
5504       APInt IntMin = APInt::getSignedMinValue(Width);
5505       APInt IntMax = APInt::getSignedMaxValue(Width);
5506       if (C->isAllOnesValue()) {
5507         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
5508         //    where C != -1 and C != 0 and C != 1
5509         Lower = IntMin + 1;
5510         Upper = IntMax + 1;
5511       } else if (C->countLeadingZeros() < Width - 1) {
5512         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
5513         //    where C != -1 and C != 0 and C != 1
5514         Lower = IntMin.sdiv(*C);
5515         Upper = IntMax.sdiv(*C);
5516         if (Lower.sgt(Upper))
5517           std::swap(Lower, Upper);
5518         Upper = Upper + 1;
5519         assert(Upper != Lower && "Upper part of range has wrapped!");
5520       }
5521     } else if (match(BO.getOperand(0), m_APInt(C))) {
5522       if (C->isMinSignedValue()) {
5523         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
5524         Lower = *C;
5525         Upper = Lower.lshr(1) + 1;
5526       } else {
5527         // 'sdiv C, x' produces [-|C|, |C|].
5528         Upper = C->abs() + 1;
5529         Lower = (-Upper) + 1;
5530       }
5531     }
5532     break;
5533 
5534   case Instruction::UDiv:
5535     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
5536       // 'udiv x, C' produces [0, UINT_MAX / C].
5537       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
5538     } else if (match(BO.getOperand(0), m_APInt(C))) {
5539       // 'udiv C, x' produces [0, C].
5540       Upper = *C + 1;
5541     }
5542     break;
5543 
5544   case Instruction::SRem:
5545     if (match(BO.getOperand(1), m_APInt(C))) {
5546       // 'srem x, C' produces (-|C|, |C|).
5547       Upper = C->abs();
5548       Lower = (-Upper) + 1;
5549     }
5550     break;
5551 
5552   case Instruction::URem:
5553     if (match(BO.getOperand(1), m_APInt(C)))
5554       // 'urem x, C' produces [0, C).
5555       Upper = *C;
5556     break;
5557 
5558   default:
5559     break;
5560   }
5561 }
5562 
5563 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
5564                                   APInt &Upper) {
5565   unsigned Width = Lower.getBitWidth();
5566   const APInt *C;
5567   switch (II.getIntrinsicID()) {
5568   case Intrinsic::uadd_sat:
5569     // uadd.sat(x, C) produces [C, UINT_MAX].
5570     if (match(II.getOperand(0), m_APInt(C)) ||
5571         match(II.getOperand(1), m_APInt(C)))
5572       Lower = *C;
5573     break;
5574   case Intrinsic::sadd_sat:
5575     if (match(II.getOperand(0), m_APInt(C)) ||
5576         match(II.getOperand(1), m_APInt(C))) {
5577       if (C->isNegative()) {
5578         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
5579         Lower = APInt::getSignedMinValue(Width);
5580         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
5581       } else {
5582         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
5583         Lower = APInt::getSignedMinValue(Width) + *C;
5584         Upper = APInt::getSignedMaxValue(Width) + 1;
5585       }
5586     }
5587     break;
5588   case Intrinsic::usub_sat:
5589     // usub.sat(C, x) produces [0, C].
5590     if (match(II.getOperand(0), m_APInt(C)))
5591       Upper = *C + 1;
5592     // usub.sat(x, C) produces [0, UINT_MAX - C].
5593     else if (match(II.getOperand(1), m_APInt(C)))
5594       Upper = APInt::getMaxValue(Width) - *C + 1;
5595     break;
5596   case Intrinsic::ssub_sat:
5597     if (match(II.getOperand(0), m_APInt(C))) {
5598       if (C->isNegative()) {
5599         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
5600         Lower = APInt::getSignedMinValue(Width);
5601         Upper = *C - APInt::getSignedMinValue(Width) + 1;
5602       } else {
5603         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
5604         Lower = *C - APInt::getSignedMaxValue(Width);
5605         Upper = APInt::getSignedMaxValue(Width) + 1;
5606       }
5607     } else if (match(II.getOperand(1), m_APInt(C))) {
5608       if (C->isNegative()) {
5609         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
5610         Lower = APInt::getSignedMinValue(Width) - *C;
5611         Upper = APInt::getSignedMaxValue(Width) + 1;
5612       } else {
5613         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
5614         Lower = APInt::getSignedMinValue(Width);
5615         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
5616       }
5617     }
5618     break;
5619   default:
5620     break;
5621   }
5622 }
5623 
5624 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
5625                                       APInt &Upper) {
5626   const Value *LHS, *RHS;
5627   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
5628   if (R.Flavor == SPF_UNKNOWN)
5629     return;
5630 
5631   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
5632 
5633   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
5634     // If the negation part of the abs (in RHS) has the NSW flag,
5635     // then the result of abs(X) is [0..SIGNED_MAX],
5636     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
5637     Lower = APInt::getNullValue(BitWidth);
5638     if (cast<Instruction>(RHS)->hasNoSignedWrap())
5639       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
5640     else
5641       Upper = APInt::getSignedMinValue(BitWidth) + 1;
5642     return;
5643   }
5644 
5645   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
5646     // The result of -abs(X) is <= 0.
5647     Lower = APInt::getSignedMinValue(BitWidth);
5648     Upper = APInt(BitWidth, 1);
5649     return;
5650   }
5651 
5652   const APInt *C;
5653   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
5654     return;
5655 
5656   switch (R.Flavor) {
5657     case SPF_UMIN:
5658       Upper = *C + 1;
5659       break;
5660     case SPF_UMAX:
5661       Lower = *C;
5662       break;
5663     case SPF_SMIN:
5664       Lower = APInt::getSignedMinValue(BitWidth);
5665       Upper = *C + 1;
5666       break;
5667     case SPF_SMAX:
5668       Lower = *C;
5669       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
5670       break;
5671     default:
5672       break;
5673   }
5674 }
5675 
5676 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo) {
5677   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
5678 
5679   const APInt *C;
5680   if (match(V, m_APInt(C)))
5681     return ConstantRange(*C);
5682 
5683   InstrInfoQuery IIQ(UseInstrInfo);
5684   unsigned BitWidth = V->getType()->getScalarSizeInBits();
5685   APInt Lower = APInt(BitWidth, 0);
5686   APInt Upper = APInt(BitWidth, 0);
5687   if (auto *BO = dyn_cast<BinaryOperator>(V))
5688     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
5689   else if (auto *II = dyn_cast<IntrinsicInst>(V))
5690     setLimitsForIntrinsic(*II, Lower, Upper);
5691   else if (auto *SI = dyn_cast<SelectInst>(V))
5692     setLimitsForSelectPattern(*SI, Lower, Upper);
5693 
5694   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
5695 
5696   if (auto *I = dyn_cast<Instruction>(V))
5697     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
5698       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
5699 
5700   return CR;
5701 }
5702