1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
32 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GetElementPtrTypeIterator.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Operator.h"
59 #include "llvm/IR/PatternMatch.h"
60 #include "llvm/IR/Type.h"
61 #include "llvm/IR/User.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/ErrorHandling.h"
67 #include "llvm/Support/KnownBits.h"
68 #include "llvm/Support/MathExtras.h"
69 #include <algorithm>
70 #include <array>
71 #include <cassert>
72 #include <cstdint>
73 #include <iterator>
74 #include <utility>
75 
76 using namespace llvm;
77 using namespace llvm::PatternMatch;
78 
79 const unsigned MaxDepth = 6;
80 
81 // Controls the number of uses of the value searched for possible
82 // dominating comparisons.
83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84                                               cl::Hidden, cl::init(20));
85 
86 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
87 /// returns the element type's bitwidth.
88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
89   if (unsigned BitWidth = Ty->getScalarSizeInBits())
90     return BitWidth;
91 
92   return DL.getIndexTypeSizeInBits(Ty);
93 }
94 
95 namespace {
96 
97 // Simplifying using an assume can only be done in a particular control-flow
98 // context (the context instruction provides that context). If an assume and
99 // the context instruction are not in the same block then the DT helps in
100 // figuring out if we can use it.
101 struct Query {
102   const DataLayout &DL;
103   AssumptionCache *AC;
104   const Instruction *CxtI;
105   const DominatorTree *DT;
106 
107   // Unlike the other analyses, this may be a nullptr because not all clients
108   // provide it currently.
109   OptimizationRemarkEmitter *ORE;
110 
111   /// Set of assumptions that should be excluded from further queries.
112   /// This is because of the potential for mutual recursion to cause
113   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
114   /// classic case of this is assume(x = y), which will attempt to determine
115   /// bits in x from bits in y, which will attempt to determine bits in y from
116   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
117   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
118   /// (all of which can call computeKnownBits), and so on.
119   std::array<const Value *, MaxDepth> Excluded;
120 
121   /// If true, it is safe to use metadata during simplification.
122   InstrInfoQuery IIQ;
123 
124   unsigned NumExcluded = 0;
125 
126   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
127         const DominatorTree *DT, bool UseInstrInfo,
128         OptimizationRemarkEmitter *ORE = nullptr)
129       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
130 
131   Query(const Query &Q, const Value *NewExcl)
132       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
133         NumExcluded(Q.NumExcluded) {
134     Excluded = Q.Excluded;
135     Excluded[NumExcluded++] = NewExcl;
136     assert(NumExcluded <= Excluded.size());
137   }
138 
139   bool isExcluded(const Value *Value) const {
140     if (NumExcluded == 0)
141       return false;
142     auto End = Excluded.begin() + NumExcluded;
143     return std::find(Excluded.begin(), End, Value) != End;
144   }
145 };
146 
147 } // end anonymous namespace
148 
149 // Given the provided Value and, potentially, a context instruction, return
150 // the preferred context instruction (if any).
151 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
152   // If we've been provided with a context instruction, then use that (provided
153   // it has been inserted).
154   if (CxtI && CxtI->getParent())
155     return CxtI;
156 
157   // If the value is really an already-inserted instruction, then use that.
158   CxtI = dyn_cast<Instruction>(V);
159   if (CxtI && CxtI->getParent())
160     return CxtI;
161 
162   return nullptr;
163 }
164 
165 static void computeKnownBits(const Value *V, KnownBits &Known,
166                              unsigned Depth, const Query &Q);
167 
168 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
169                             const DataLayout &DL, unsigned Depth,
170                             AssumptionCache *AC, const Instruction *CxtI,
171                             const DominatorTree *DT,
172                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
173   ::computeKnownBits(V, Known, Depth,
174                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
175 }
176 
177 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
178                                   const Query &Q);
179 
180 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
181                                  unsigned Depth, AssumptionCache *AC,
182                                  const Instruction *CxtI,
183                                  const DominatorTree *DT,
184                                  OptimizationRemarkEmitter *ORE,
185                                  bool UseInstrInfo) {
186   return ::computeKnownBits(
187       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
188 }
189 
190 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
191                                const DataLayout &DL, AssumptionCache *AC,
192                                const Instruction *CxtI, const DominatorTree *DT,
193                                bool UseInstrInfo) {
194   assert(LHS->getType() == RHS->getType() &&
195          "LHS and RHS should have the same type");
196   assert(LHS->getType()->isIntOrIntVectorTy() &&
197          "LHS and RHS should be integers");
198   // Look for an inverted mask: (X & ~M) op (Y & M).
199   Value *M;
200   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
201       match(RHS, m_c_And(m_Specific(M), m_Value())))
202     return true;
203   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
204       match(LHS, m_c_And(m_Specific(M), m_Value())))
205     return true;
206   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
207   KnownBits LHSKnown(IT->getBitWidth());
208   KnownBits RHSKnown(IT->getBitWidth());
209   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
210   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
211   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
212 }
213 
214 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
215   for (const User *U : CxtI->users()) {
216     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
217       if (IC->isEquality())
218         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
219           if (C->isNullValue())
220             continue;
221     return false;
222   }
223   return true;
224 }
225 
226 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
227                                    const Query &Q);
228 
229 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
230                                   bool OrZero, unsigned Depth,
231                                   AssumptionCache *AC, const Instruction *CxtI,
232                                   const DominatorTree *DT, bool UseInstrInfo) {
233   return ::isKnownToBeAPowerOfTwo(
234       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
235 }
236 
237 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
238 
239 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
240                           AssumptionCache *AC, const Instruction *CxtI,
241                           const DominatorTree *DT, bool UseInstrInfo) {
242   return ::isKnownNonZero(V, Depth,
243                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
244 }
245 
246 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
247                               unsigned Depth, AssumptionCache *AC,
248                               const Instruction *CxtI, const DominatorTree *DT,
249                               bool UseInstrInfo) {
250   KnownBits Known =
251       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
252   return Known.isNonNegative();
253 }
254 
255 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
256                            AssumptionCache *AC, const Instruction *CxtI,
257                            const DominatorTree *DT, bool UseInstrInfo) {
258   if (auto *CI = dyn_cast<ConstantInt>(V))
259     return CI->getValue().isStrictlyPositive();
260 
261   // TODO: We'd doing two recursive queries here.  We should factor this such
262   // that only a single query is needed.
263   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
264          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
265 }
266 
267 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
268                            AssumptionCache *AC, const Instruction *CxtI,
269                            const DominatorTree *DT, bool UseInstrInfo) {
270   KnownBits Known =
271       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
272   return Known.isNegative();
273 }
274 
275 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
276 
277 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
278                            const DataLayout &DL, AssumptionCache *AC,
279                            const Instruction *CxtI, const DominatorTree *DT,
280                            bool UseInstrInfo) {
281   return ::isKnownNonEqual(V1, V2,
282                            Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
283                                  UseInstrInfo, /*ORE=*/nullptr));
284 }
285 
286 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
287                               const Query &Q);
288 
289 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
290                              const DataLayout &DL, unsigned Depth,
291                              AssumptionCache *AC, const Instruction *CxtI,
292                              const DominatorTree *DT, bool UseInstrInfo) {
293   return ::MaskedValueIsZero(
294       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
295 }
296 
297 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
298                                    const Query &Q);
299 
300 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
301                                   unsigned Depth, AssumptionCache *AC,
302                                   const Instruction *CxtI,
303                                   const DominatorTree *DT, bool UseInstrInfo) {
304   return ::ComputeNumSignBits(
305       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
306 }
307 
308 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
309                                    bool NSW,
310                                    KnownBits &KnownOut, KnownBits &Known2,
311                                    unsigned Depth, const Query &Q) {
312   unsigned BitWidth = KnownOut.getBitWidth();
313 
314   // If an initial sequence of bits in the result is not needed, the
315   // corresponding bits in the operands are not needed.
316   KnownBits LHSKnown(BitWidth);
317   computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
318   computeKnownBits(Op1, Known2, Depth + 1, Q);
319 
320   KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
321 }
322 
323 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
324                                 KnownBits &Known, KnownBits &Known2,
325                                 unsigned Depth, const Query &Q) {
326   unsigned BitWidth = Known.getBitWidth();
327   computeKnownBits(Op1, Known, Depth + 1, Q);
328   computeKnownBits(Op0, Known2, Depth + 1, Q);
329 
330   bool isKnownNegative = false;
331   bool isKnownNonNegative = false;
332   // If the multiplication is known not to overflow, compute the sign bit.
333   if (NSW) {
334     if (Op0 == Op1) {
335       // The product of a number with itself is non-negative.
336       isKnownNonNegative = true;
337     } else {
338       bool isKnownNonNegativeOp1 = Known.isNonNegative();
339       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
340       bool isKnownNegativeOp1 = Known.isNegative();
341       bool isKnownNegativeOp0 = Known2.isNegative();
342       // The product of two numbers with the same sign is non-negative.
343       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
344         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
345       // The product of a negative number and a non-negative number is either
346       // negative or zero.
347       if (!isKnownNonNegative)
348         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
349                            isKnownNonZero(Op0, Depth, Q)) ||
350                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
351                            isKnownNonZero(Op1, Depth, Q));
352     }
353   }
354 
355   assert(!Known.hasConflict() && !Known2.hasConflict());
356   // Compute a conservative estimate for high known-0 bits.
357   unsigned LeadZ =  std::max(Known.countMinLeadingZeros() +
358                              Known2.countMinLeadingZeros(),
359                              BitWidth) - BitWidth;
360   LeadZ = std::min(LeadZ, BitWidth);
361 
362   // The result of the bottom bits of an integer multiply can be
363   // inferred by looking at the bottom bits of both operands and
364   // multiplying them together.
365   // We can infer at least the minimum number of known trailing bits
366   // of both operands. Depending on number of trailing zeros, we can
367   // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
368   // a and b are divisible by m and n respectively.
369   // We then calculate how many of those bits are inferrable and set
370   // the output. For example, the i8 mul:
371   //  a = XXXX1100 (12)
372   //  b = XXXX1110 (14)
373   // We know the bottom 3 bits are zero since the first can be divided by
374   // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
375   // Applying the multiplication to the trimmed arguments gets:
376   //    XX11 (3)
377   //    X111 (7)
378   // -------
379   //    XX11
380   //   XX11
381   //  XX11
382   // XX11
383   // -------
384   // XXXXX01
385   // Which allows us to infer the 2 LSBs. Since we're multiplying the result
386   // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
387   // The proof for this can be described as:
388   // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
389   //      (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
390   //                    umin(countTrailingZeros(C2), C6) +
391   //                    umin(C5 - umin(countTrailingZeros(C1), C5),
392   //                         C6 - umin(countTrailingZeros(C2), C6)))) - 1)
393   // %aa = shl i8 %a, C5
394   // %bb = shl i8 %b, C6
395   // %aaa = or i8 %aa, C1
396   // %bbb = or i8 %bb, C2
397   // %mul = mul i8 %aaa, %bbb
398   // %mask = and i8 %mul, C7
399   //   =>
400   // %mask = i8 ((C1*C2)&C7)
401   // Where C5, C6 describe the known bits of %a, %b
402   // C1, C2 describe the known bottom bits of %a, %b.
403   // C7 describes the mask of the known bits of the result.
404   APInt Bottom0 = Known.One;
405   APInt Bottom1 = Known2.One;
406 
407   // How many times we'd be able to divide each argument by 2 (shr by 1).
408   // This gives us the number of trailing zeros on the multiplication result.
409   unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
410   unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
411   unsigned TrailZero0 = Known.countMinTrailingZeros();
412   unsigned TrailZero1 = Known2.countMinTrailingZeros();
413   unsigned TrailZ = TrailZero0 + TrailZero1;
414 
415   // Figure out the fewest known-bits operand.
416   unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
417                                       TrailBitsKnown1 - TrailZero1);
418   unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
419 
420   APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
421                       Bottom1.getLoBits(TrailBitsKnown1);
422 
423   Known.resetAll();
424   Known.Zero.setHighBits(LeadZ);
425   Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
426   Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
427 
428   // Only make use of no-wrap flags if we failed to compute the sign bit
429   // directly.  This matters if the multiplication always overflows, in
430   // which case we prefer to follow the result of the direct computation,
431   // though as the program is invoking undefined behaviour we can choose
432   // whatever we like here.
433   if (isKnownNonNegative && !Known.isNegative())
434     Known.makeNonNegative();
435   else if (isKnownNegative && !Known.isNonNegative())
436     Known.makeNegative();
437 }
438 
439 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
440                                              KnownBits &Known) {
441   unsigned BitWidth = Known.getBitWidth();
442   unsigned NumRanges = Ranges.getNumOperands() / 2;
443   assert(NumRanges >= 1);
444 
445   Known.Zero.setAllBits();
446   Known.One.setAllBits();
447 
448   for (unsigned i = 0; i < NumRanges; ++i) {
449     ConstantInt *Lower =
450         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
451     ConstantInt *Upper =
452         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
453     ConstantRange Range(Lower->getValue(), Upper->getValue());
454 
455     // The first CommonPrefixBits of all values in Range are equal.
456     unsigned CommonPrefixBits =
457         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
458 
459     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
460     Known.One &= Range.getUnsignedMax() & Mask;
461     Known.Zero &= ~Range.getUnsignedMax() & Mask;
462   }
463 }
464 
465 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
466   SmallVector<const Value *, 16> WorkSet(1, I);
467   SmallPtrSet<const Value *, 32> Visited;
468   SmallPtrSet<const Value *, 16> EphValues;
469 
470   // The instruction defining an assumption's condition itself is always
471   // considered ephemeral to that assumption (even if it has other
472   // non-ephemeral users). See r246696's test case for an example.
473   if (is_contained(I->operands(), E))
474     return true;
475 
476   while (!WorkSet.empty()) {
477     const Value *V = WorkSet.pop_back_val();
478     if (!Visited.insert(V).second)
479       continue;
480 
481     // If all uses of this value are ephemeral, then so is this value.
482     if (llvm::all_of(V->users(), [&](const User *U) {
483                                    return EphValues.count(U);
484                                  })) {
485       if (V == E)
486         return true;
487 
488       if (V == I || isSafeToSpeculativelyExecute(V)) {
489        EphValues.insert(V);
490        if (const User *U = dyn_cast<User>(V))
491          for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
492               J != JE; ++J)
493            WorkSet.push_back(*J);
494       }
495     }
496   }
497 
498   return false;
499 }
500 
501 // Is this an intrinsic that cannot be speculated but also cannot trap?
502 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
503   if (const CallInst *CI = dyn_cast<CallInst>(I))
504     if (Function *F = CI->getCalledFunction())
505       switch (F->getIntrinsicID()) {
506       default: break;
507       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
508       case Intrinsic::assume:
509       case Intrinsic::sideeffect:
510       case Intrinsic::dbg_declare:
511       case Intrinsic::dbg_value:
512       case Intrinsic::dbg_label:
513       case Intrinsic::invariant_start:
514       case Intrinsic::invariant_end:
515       case Intrinsic::lifetime_start:
516       case Intrinsic::lifetime_end:
517       case Intrinsic::objectsize:
518       case Intrinsic::ptr_annotation:
519       case Intrinsic::var_annotation:
520         return true;
521       }
522 
523   return false;
524 }
525 
526 bool llvm::isValidAssumeForContext(const Instruction *Inv,
527                                    const Instruction *CxtI,
528                                    const DominatorTree *DT) {
529   // There are two restrictions on the use of an assume:
530   //  1. The assume must dominate the context (or the control flow must
531   //     reach the assume whenever it reaches the context).
532   //  2. The context must not be in the assume's set of ephemeral values
533   //     (otherwise we will use the assume to prove that the condition
534   //     feeding the assume is trivially true, thus causing the removal of
535   //     the assume).
536 
537   if (DT) {
538     if (DT->dominates(Inv, CxtI))
539       return true;
540   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
541     // We don't have a DT, but this trivially dominates.
542     return true;
543   }
544 
545   // With or without a DT, the only remaining case we will check is if the
546   // instructions are in the same BB.  Give up if that is not the case.
547   if (Inv->getParent() != CxtI->getParent())
548     return false;
549 
550   // If we have a dom tree, then we now know that the assume doesn't dominate
551   // the other instruction.  If we don't have a dom tree then we can check if
552   // the assume is first in the BB.
553   if (!DT) {
554     // Search forward from the assume until we reach the context (or the end
555     // of the block); the common case is that the assume will come first.
556     for (auto I = std::next(BasicBlock::const_iterator(Inv)),
557          IE = Inv->getParent()->end(); I != IE; ++I)
558       if (&*I == CxtI)
559         return true;
560   }
561 
562   // The context comes first, but they're both in the same block. Make sure
563   // there is nothing in between that might interrupt the control flow.
564   for (BasicBlock::const_iterator I =
565          std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
566        I != IE; ++I)
567     if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
568       return false;
569 
570   return !isEphemeralValueOf(Inv, CxtI);
571 }
572 
573 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
574                                        unsigned Depth, const Query &Q) {
575   // Use of assumptions is context-sensitive. If we don't have a context, we
576   // cannot use them!
577   if (!Q.AC || !Q.CxtI)
578     return;
579 
580   unsigned BitWidth = Known.getBitWidth();
581 
582   // Note that the patterns below need to be kept in sync with the code
583   // in AssumptionCache::updateAffectedValues.
584 
585   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
586     if (!AssumeVH)
587       continue;
588     CallInst *I = cast<CallInst>(AssumeVH);
589     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
590            "Got assumption for the wrong function!");
591     if (Q.isExcluded(I))
592       continue;
593 
594     // Warning: This loop can end up being somewhat performance sensitive.
595     // We're running this loop for once for each value queried resulting in a
596     // runtime of ~O(#assumes * #values).
597 
598     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
599            "must be an assume intrinsic");
600 
601     Value *Arg = I->getArgOperand(0);
602 
603     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
604       assert(BitWidth == 1 && "assume operand is not i1?");
605       Known.setAllOnes();
606       return;
607     }
608     if (match(Arg, m_Not(m_Specific(V))) &&
609         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
610       assert(BitWidth == 1 && "assume operand is not i1?");
611       Known.setAllZero();
612       return;
613     }
614 
615     // The remaining tests are all recursive, so bail out if we hit the limit.
616     if (Depth == MaxDepth)
617       continue;
618 
619     Value *A, *B;
620     auto m_V = m_CombineOr(m_Specific(V),
621                            m_CombineOr(m_PtrToInt(m_Specific(V)),
622                            m_BitCast(m_Specific(V))));
623 
624     CmpInst::Predicate Pred;
625     uint64_t C;
626     // assume(v = a)
627     if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
628         Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
629       KnownBits RHSKnown(BitWidth);
630       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
631       Known.Zero |= RHSKnown.Zero;
632       Known.One  |= RHSKnown.One;
633     // assume(v & b = a)
634     } else if (match(Arg,
635                      m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
636                Pred == ICmpInst::ICMP_EQ &&
637                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
638       KnownBits RHSKnown(BitWidth);
639       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
640       KnownBits MaskKnown(BitWidth);
641       computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
642 
643       // For those bits in the mask that are known to be one, we can propagate
644       // known bits from the RHS to V.
645       Known.Zero |= RHSKnown.Zero & MaskKnown.One;
646       Known.One  |= RHSKnown.One  & MaskKnown.One;
647     // assume(~(v & b) = a)
648     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
649                                    m_Value(A))) &&
650                Pred == ICmpInst::ICMP_EQ &&
651                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
652       KnownBits RHSKnown(BitWidth);
653       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
654       KnownBits MaskKnown(BitWidth);
655       computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
656 
657       // For those bits in the mask that are known to be one, we can propagate
658       // inverted known bits from the RHS to V.
659       Known.Zero |= RHSKnown.One  & MaskKnown.One;
660       Known.One  |= RHSKnown.Zero & MaskKnown.One;
661     // assume(v | b = a)
662     } else if (match(Arg,
663                      m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
664                Pred == ICmpInst::ICMP_EQ &&
665                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
666       KnownBits RHSKnown(BitWidth);
667       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
668       KnownBits BKnown(BitWidth);
669       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
670 
671       // For those bits in B that are known to be zero, we can propagate known
672       // bits from the RHS to V.
673       Known.Zero |= RHSKnown.Zero & BKnown.Zero;
674       Known.One  |= RHSKnown.One  & BKnown.Zero;
675     // assume(~(v | b) = a)
676     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
677                                    m_Value(A))) &&
678                Pred == ICmpInst::ICMP_EQ &&
679                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
680       KnownBits RHSKnown(BitWidth);
681       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
682       KnownBits BKnown(BitWidth);
683       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
684 
685       // For those bits in B that are known to be zero, we can propagate
686       // inverted known bits from the RHS to V.
687       Known.Zero |= RHSKnown.One  & BKnown.Zero;
688       Known.One  |= RHSKnown.Zero & BKnown.Zero;
689     // assume(v ^ b = a)
690     } else if (match(Arg,
691                      m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
692                Pred == ICmpInst::ICMP_EQ &&
693                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
694       KnownBits RHSKnown(BitWidth);
695       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
696       KnownBits BKnown(BitWidth);
697       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
698 
699       // For those bits in B that are known to be zero, we can propagate known
700       // bits from the RHS to V. For those bits in B that are known to be one,
701       // we can propagate inverted known bits from the RHS to V.
702       Known.Zero |= RHSKnown.Zero & BKnown.Zero;
703       Known.One  |= RHSKnown.One  & BKnown.Zero;
704       Known.Zero |= RHSKnown.One  & BKnown.One;
705       Known.One  |= RHSKnown.Zero & BKnown.One;
706     // assume(~(v ^ b) = a)
707     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
708                                    m_Value(A))) &&
709                Pred == ICmpInst::ICMP_EQ &&
710                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
711       KnownBits RHSKnown(BitWidth);
712       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
713       KnownBits BKnown(BitWidth);
714       computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
715 
716       // For those bits in B that are known to be zero, we can propagate
717       // inverted known bits from the RHS to V. For those bits in B that are
718       // known to be one, we can propagate known bits from the RHS to V.
719       Known.Zero |= RHSKnown.One  & BKnown.Zero;
720       Known.One  |= RHSKnown.Zero & BKnown.Zero;
721       Known.Zero |= RHSKnown.Zero & BKnown.One;
722       Known.One  |= RHSKnown.One  & BKnown.One;
723     // assume(v << c = a)
724     } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
725                                    m_Value(A))) &&
726                Pred == ICmpInst::ICMP_EQ &&
727                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
728                C < BitWidth) {
729       KnownBits RHSKnown(BitWidth);
730       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
731       // For those bits in RHS that are known, we can propagate them to known
732       // bits in V shifted to the right by C.
733       RHSKnown.Zero.lshrInPlace(C);
734       Known.Zero |= RHSKnown.Zero;
735       RHSKnown.One.lshrInPlace(C);
736       Known.One  |= RHSKnown.One;
737     // assume(~(v << c) = a)
738     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
739                                    m_Value(A))) &&
740                Pred == ICmpInst::ICMP_EQ &&
741                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
742                C < BitWidth) {
743       KnownBits RHSKnown(BitWidth);
744       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
745       // For those bits in RHS that are known, we can propagate them inverted
746       // to known bits in V shifted to the right by C.
747       RHSKnown.One.lshrInPlace(C);
748       Known.Zero |= RHSKnown.One;
749       RHSKnown.Zero.lshrInPlace(C);
750       Known.One  |= RHSKnown.Zero;
751     // assume(v >> c = a)
752     } else if (match(Arg,
753                      m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
754                               m_Value(A))) &&
755                Pred == ICmpInst::ICMP_EQ &&
756                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
757                C < BitWidth) {
758       KnownBits RHSKnown(BitWidth);
759       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
760       // For those bits in RHS that are known, we can propagate them to known
761       // bits in V shifted to the right by C.
762       Known.Zero |= RHSKnown.Zero << C;
763       Known.One  |= RHSKnown.One  << C;
764     // assume(~(v >> c) = a)
765     } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
766                                    m_Value(A))) &&
767                Pred == ICmpInst::ICMP_EQ &&
768                isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
769                C < BitWidth) {
770       KnownBits RHSKnown(BitWidth);
771       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
772       // For those bits in RHS that are known, we can propagate them inverted
773       // to known bits in V shifted to the right by C.
774       Known.Zero |= RHSKnown.One  << C;
775       Known.One  |= RHSKnown.Zero << C;
776     // assume(v >=_s c) where c is non-negative
777     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
778                Pred == ICmpInst::ICMP_SGE &&
779                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
780       KnownBits RHSKnown(BitWidth);
781       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
782 
783       if (RHSKnown.isNonNegative()) {
784         // We know that the sign bit is zero.
785         Known.makeNonNegative();
786       }
787     // assume(v >_s c) where c is at least -1.
788     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
789                Pred == ICmpInst::ICMP_SGT &&
790                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
791       KnownBits RHSKnown(BitWidth);
792       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
793 
794       if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
795         // We know that the sign bit is zero.
796         Known.makeNonNegative();
797       }
798     // assume(v <=_s c) where c is negative
799     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
800                Pred == ICmpInst::ICMP_SLE &&
801                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
802       KnownBits RHSKnown(BitWidth);
803       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
804 
805       if (RHSKnown.isNegative()) {
806         // We know that the sign bit is one.
807         Known.makeNegative();
808       }
809     // assume(v <_s c) where c is non-positive
810     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
811                Pred == ICmpInst::ICMP_SLT &&
812                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
813       KnownBits RHSKnown(BitWidth);
814       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
815 
816       if (RHSKnown.isZero() || RHSKnown.isNegative()) {
817         // We know that the sign bit is one.
818         Known.makeNegative();
819       }
820     // assume(v <=_u c)
821     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
822                Pred == ICmpInst::ICMP_ULE &&
823                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
824       KnownBits RHSKnown(BitWidth);
825       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
826 
827       // Whatever high bits in c are zero are known to be zero.
828       Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
829       // assume(v <_u c)
830     } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
831                Pred == ICmpInst::ICMP_ULT &&
832                isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
833       KnownBits RHSKnown(BitWidth);
834       computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
835 
836       // If the RHS is known zero, then this assumption must be wrong (nothing
837       // is unsigned less than zero). Signal a conflict and get out of here.
838       if (RHSKnown.isZero()) {
839         Known.Zero.setAllBits();
840         Known.One.setAllBits();
841         break;
842       }
843 
844       // Whatever high bits in c are zero are known to be zero (if c is a power
845       // of 2, then one more).
846       if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
847         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
848       else
849         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
850     }
851   }
852 
853   // If assumptions conflict with each other or previous known bits, then we
854   // have a logical fallacy. It's possible that the assumption is not reachable,
855   // so this isn't a real bug. On the other hand, the program may have undefined
856   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
857   // clear out the known bits, try to warn the user, and hope for the best.
858   if (Known.Zero.intersects(Known.One)) {
859     Known.resetAll();
860 
861     if (Q.ORE)
862       Q.ORE->emit([&]() {
863         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
864         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
865                                           CxtI)
866                << "Detected conflicting code assumptions. Program may "
867                   "have undefined behavior, or compiler may have "
868                   "internal error.";
869       });
870   }
871 }
872 
873 /// Compute known bits from a shift operator, including those with a
874 /// non-constant shift amount. Known is the output of this function. Known2 is a
875 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
876 /// operator-specific functions that, given the known-zero or known-one bits
877 /// respectively, and a shift amount, compute the implied known-zero or
878 /// known-one bits of the shift operator's result respectively for that shift
879 /// amount. The results from calling KZF and KOF are conservatively combined for
880 /// all permitted shift amounts.
881 static void computeKnownBitsFromShiftOperator(
882     const Operator *I, KnownBits &Known, KnownBits &Known2,
883     unsigned Depth, const Query &Q,
884     function_ref<APInt(const APInt &, unsigned)> KZF,
885     function_ref<APInt(const APInt &, unsigned)> KOF) {
886   unsigned BitWidth = Known.getBitWidth();
887 
888   if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
889     unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
890 
891     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
892     Known.Zero = KZF(Known.Zero, ShiftAmt);
893     Known.One  = KOF(Known.One, ShiftAmt);
894     // If the known bits conflict, this must be an overflowing left shift, so
895     // the shift result is poison. We can return anything we want. Choose 0 for
896     // the best folding opportunity.
897     if (Known.hasConflict())
898       Known.setAllZero();
899 
900     return;
901   }
902 
903   computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
904 
905   // If the shift amount could be greater than or equal to the bit-width of the
906   // LHS, the value could be poison, but bail out because the check below is
907   // expensive. TODO: Should we just carry on?
908   if ((~Known.Zero).uge(BitWidth)) {
909     Known.resetAll();
910     return;
911   }
912 
913   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
914   // BitWidth > 64 and any upper bits are known, we'll end up returning the
915   // limit value (which implies all bits are known).
916   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
917   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
918 
919   // It would be more-clearly correct to use the two temporaries for this
920   // calculation. Reusing the APInts here to prevent unnecessary allocations.
921   Known.resetAll();
922 
923   // If we know the shifter operand is nonzero, we can sometimes infer more
924   // known bits. However this is expensive to compute, so be lazy about it and
925   // only compute it when absolutely necessary.
926   Optional<bool> ShifterOperandIsNonZero;
927 
928   // Early exit if we can't constrain any well-defined shift amount.
929   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
930       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
931     ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
932     if (!*ShifterOperandIsNonZero)
933       return;
934   }
935 
936   computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
937 
938   Known.Zero.setAllBits();
939   Known.One.setAllBits();
940   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
941     // Combine the shifted known input bits only for those shift amounts
942     // compatible with its known constraints.
943     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
944       continue;
945     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
946       continue;
947     // If we know the shifter is nonzero, we may be able to infer more known
948     // bits. This check is sunk down as far as possible to avoid the expensive
949     // call to isKnownNonZero if the cheaper checks above fail.
950     if (ShiftAmt == 0) {
951       if (!ShifterOperandIsNonZero.hasValue())
952         ShifterOperandIsNonZero =
953             isKnownNonZero(I->getOperand(1), Depth + 1, Q);
954       if (*ShifterOperandIsNonZero)
955         continue;
956     }
957 
958     Known.Zero &= KZF(Known2.Zero, ShiftAmt);
959     Known.One  &= KOF(Known2.One, ShiftAmt);
960   }
961 
962   // If the known bits conflict, the result is poison. Return a 0 and hope the
963   // caller can further optimize that.
964   if (Known.hasConflict())
965     Known.setAllZero();
966 }
967 
968 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
969                                          unsigned Depth, const Query &Q) {
970   unsigned BitWidth = Known.getBitWidth();
971 
972   KnownBits Known2(Known);
973   switch (I->getOpcode()) {
974   default: break;
975   case Instruction::Load:
976     if (MDNode *MD =
977             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
978       computeKnownBitsFromRangeMetadata(*MD, Known);
979     break;
980   case Instruction::And: {
981     // If either the LHS or the RHS are Zero, the result is zero.
982     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
983     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
984 
985     // Output known-1 bits are only known if set in both the LHS & RHS.
986     Known.One &= Known2.One;
987     // Output known-0 are known to be clear if zero in either the LHS | RHS.
988     Known.Zero |= Known2.Zero;
989 
990     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
991     // here we handle the more general case of adding any odd number by
992     // matching the form add(x, add(x, y)) where y is odd.
993     // TODO: This could be generalized to clearing any bit set in y where the
994     // following bit is known to be unset in y.
995     Value *X = nullptr, *Y = nullptr;
996     if (!Known.Zero[0] && !Known.One[0] &&
997         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
998       Known2.resetAll();
999       computeKnownBits(Y, Known2, Depth + 1, Q);
1000       if (Known2.countMinTrailingOnes() > 0)
1001         Known.Zero.setBit(0);
1002     }
1003     break;
1004   }
1005   case Instruction::Or:
1006     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1007     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1008 
1009     // Output known-0 bits are only known if clear in both the LHS & RHS.
1010     Known.Zero &= Known2.Zero;
1011     // Output known-1 are known to be set if set in either the LHS | RHS.
1012     Known.One |= Known2.One;
1013     break;
1014   case Instruction::Xor: {
1015     computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1016     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1017 
1018     // Output known-0 bits are known if clear or set in both the LHS & RHS.
1019     APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1020     // Output known-1 are known to be set if set in only one of the LHS, RHS.
1021     Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1022     Known.Zero = std::move(KnownZeroOut);
1023     break;
1024   }
1025   case Instruction::Mul: {
1026     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1027     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1028                         Known2, Depth, Q);
1029     break;
1030   }
1031   case Instruction::UDiv: {
1032     // For the purposes of computing leading zeros we can conservatively
1033     // treat a udiv as a logical right shift by the power of 2 known to
1034     // be less than the denominator.
1035     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1036     unsigned LeadZ = Known2.countMinLeadingZeros();
1037 
1038     Known2.resetAll();
1039     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1040     unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1041     if (RHSMaxLeadingZeros != BitWidth)
1042       LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1043 
1044     Known.Zero.setHighBits(LeadZ);
1045     break;
1046   }
1047   case Instruction::Select: {
1048     const Value *LHS, *RHS;
1049     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1050     if (SelectPatternResult::isMinOrMax(SPF)) {
1051       computeKnownBits(RHS, Known, Depth + 1, Q);
1052       computeKnownBits(LHS, Known2, Depth + 1, Q);
1053     } else {
1054       computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1055       computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1056     }
1057 
1058     unsigned MaxHighOnes = 0;
1059     unsigned MaxHighZeros = 0;
1060     if (SPF == SPF_SMAX) {
1061       // If both sides are negative, the result is negative.
1062       if (Known.isNegative() && Known2.isNegative())
1063         // We can derive a lower bound on the result by taking the max of the
1064         // leading one bits.
1065         MaxHighOnes =
1066             std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1067       // If either side is non-negative, the result is non-negative.
1068       else if (Known.isNonNegative() || Known2.isNonNegative())
1069         MaxHighZeros = 1;
1070     } else if (SPF == SPF_SMIN) {
1071       // If both sides are non-negative, the result is non-negative.
1072       if (Known.isNonNegative() && Known2.isNonNegative())
1073         // We can derive an upper bound on the result by taking the max of the
1074         // leading zero bits.
1075         MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1076                                 Known2.countMinLeadingZeros());
1077       // If either side is negative, the result is negative.
1078       else if (Known.isNegative() || Known2.isNegative())
1079         MaxHighOnes = 1;
1080     } else if (SPF == SPF_UMAX) {
1081       // We can derive a lower bound on the result by taking the max of the
1082       // leading one bits.
1083       MaxHighOnes =
1084           std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1085     } else if (SPF == SPF_UMIN) {
1086       // We can derive an upper bound on the result by taking the max of the
1087       // leading zero bits.
1088       MaxHighZeros =
1089           std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1090     } else if (SPF == SPF_ABS) {
1091       // RHS from matchSelectPattern returns the negation part of abs pattern.
1092       // If the negate has an NSW flag we can assume the sign bit of the result
1093       // will be 0 because that makes abs(INT_MIN) undefined.
1094       if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1095         MaxHighZeros = 1;
1096     }
1097 
1098     // Only known if known in both the LHS and RHS.
1099     Known.One &= Known2.One;
1100     Known.Zero &= Known2.Zero;
1101     if (MaxHighOnes > 0)
1102       Known.One.setHighBits(MaxHighOnes);
1103     if (MaxHighZeros > 0)
1104       Known.Zero.setHighBits(MaxHighZeros);
1105     break;
1106   }
1107   case Instruction::FPTrunc:
1108   case Instruction::FPExt:
1109   case Instruction::FPToUI:
1110   case Instruction::FPToSI:
1111   case Instruction::SIToFP:
1112   case Instruction::UIToFP:
1113     break; // Can't work with floating point.
1114   case Instruction::PtrToInt:
1115   case Instruction::IntToPtr:
1116     // Fall through and handle them the same as zext/trunc.
1117     LLVM_FALLTHROUGH;
1118   case Instruction::ZExt:
1119   case Instruction::Trunc: {
1120     Type *SrcTy = I->getOperand(0)->getType();
1121 
1122     unsigned SrcBitWidth;
1123     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1124     // which fall through here.
1125     Type *ScalarTy = SrcTy->getScalarType();
1126     SrcBitWidth = ScalarTy->isPointerTy() ?
1127       Q.DL.getIndexTypeSizeInBits(ScalarTy) :
1128       Q.DL.getTypeSizeInBits(ScalarTy);
1129 
1130     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1131     Known = Known.zextOrTrunc(SrcBitWidth);
1132     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1133     Known = Known.zextOrTrunc(BitWidth);
1134     // Any top bits are known to be zero.
1135     if (BitWidth > SrcBitWidth)
1136       Known.Zero.setBitsFrom(SrcBitWidth);
1137     break;
1138   }
1139   case Instruction::BitCast: {
1140     Type *SrcTy = I->getOperand(0)->getType();
1141     if (SrcTy->isIntOrPtrTy() &&
1142         // TODO: For now, not handling conversions like:
1143         // (bitcast i64 %x to <2 x i32>)
1144         !I->getType()->isVectorTy()) {
1145       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1146       break;
1147     }
1148     break;
1149   }
1150   case Instruction::SExt: {
1151     // Compute the bits in the result that are not present in the input.
1152     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1153 
1154     Known = Known.trunc(SrcBitWidth);
1155     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1156     // If the sign bit of the input is known set or clear, then we know the
1157     // top bits of the result.
1158     Known = Known.sext(BitWidth);
1159     break;
1160   }
1161   case Instruction::Shl: {
1162     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1163     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1164     auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1165       APInt KZResult = KnownZero << ShiftAmt;
1166       KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1167       // If this shift has "nsw" keyword, then the result is either a poison
1168       // value or has the same sign bit as the first operand.
1169       if (NSW && KnownZero.isSignBitSet())
1170         KZResult.setSignBit();
1171       return KZResult;
1172     };
1173 
1174     auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1175       APInt KOResult = KnownOne << ShiftAmt;
1176       if (NSW && KnownOne.isSignBitSet())
1177         KOResult.setSignBit();
1178       return KOResult;
1179     };
1180 
1181     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1182     break;
1183   }
1184   case Instruction::LShr: {
1185     // (lshr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1186     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1187       APInt KZResult = KnownZero.lshr(ShiftAmt);
1188       // High bits known zero.
1189       KZResult.setHighBits(ShiftAmt);
1190       return KZResult;
1191     };
1192 
1193     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1194       return KnownOne.lshr(ShiftAmt);
1195     };
1196 
1197     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1198     break;
1199   }
1200   case Instruction::AShr: {
1201     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1202     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1203       return KnownZero.ashr(ShiftAmt);
1204     };
1205 
1206     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1207       return KnownOne.ashr(ShiftAmt);
1208     };
1209 
1210     computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1211     break;
1212   }
1213   case Instruction::Sub: {
1214     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1215     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1216                            Known, Known2, Depth, Q);
1217     break;
1218   }
1219   case Instruction::Add: {
1220     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1221     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1222                            Known, Known2, Depth, Q);
1223     break;
1224   }
1225   case Instruction::SRem:
1226     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1227       APInt RA = Rem->getValue().abs();
1228       if (RA.isPowerOf2()) {
1229         APInt LowBits = RA - 1;
1230         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1231 
1232         // The low bits of the first operand are unchanged by the srem.
1233         Known.Zero = Known2.Zero & LowBits;
1234         Known.One = Known2.One & LowBits;
1235 
1236         // If the first operand is non-negative or has all low bits zero, then
1237         // the upper bits are all zero.
1238         if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1239           Known.Zero |= ~LowBits;
1240 
1241         // If the first operand is negative and not all low bits are zero, then
1242         // the upper bits are all one.
1243         if (Known2.isNegative() && LowBits.intersects(Known2.One))
1244           Known.One |= ~LowBits;
1245 
1246         assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1247         break;
1248       }
1249     }
1250 
1251     // The sign bit is the LHS's sign bit, except when the result of the
1252     // remainder is zero.
1253     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1254     // If it's known zero, our sign bit is also zero.
1255     if (Known2.isNonNegative())
1256       Known.makeNonNegative();
1257 
1258     break;
1259   case Instruction::URem: {
1260     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1261       const APInt &RA = Rem->getValue();
1262       if (RA.isPowerOf2()) {
1263         APInt LowBits = (RA - 1);
1264         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1265         Known.Zero |= ~LowBits;
1266         Known.One &= LowBits;
1267         break;
1268       }
1269     }
1270 
1271     // Since the result is less than or equal to either operand, any leading
1272     // zero bits in either operand must also exist in the result.
1273     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1274     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1275 
1276     unsigned Leaders =
1277         std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1278     Known.resetAll();
1279     Known.Zero.setHighBits(Leaders);
1280     break;
1281   }
1282 
1283   case Instruction::Alloca: {
1284     const AllocaInst *AI = cast<AllocaInst>(I);
1285     unsigned Align = AI->getAlignment();
1286     if (Align == 0)
1287       Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1288 
1289     if (Align > 0)
1290       Known.Zero.setLowBits(countTrailingZeros(Align));
1291     break;
1292   }
1293   case Instruction::GetElementPtr: {
1294     // Analyze all of the subscripts of this getelementptr instruction
1295     // to determine if we can prove known low zero bits.
1296     KnownBits LocalKnown(BitWidth);
1297     computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1298     unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1299 
1300     gep_type_iterator GTI = gep_type_begin(I);
1301     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1302       Value *Index = I->getOperand(i);
1303       if (StructType *STy = GTI.getStructTypeOrNull()) {
1304         // Handle struct member offset arithmetic.
1305 
1306         // Handle case when index is vector zeroinitializer
1307         Constant *CIndex = cast<Constant>(Index);
1308         if (CIndex->isZeroValue())
1309           continue;
1310 
1311         if (CIndex->getType()->isVectorTy())
1312           Index = CIndex->getSplatValue();
1313 
1314         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1315         const StructLayout *SL = Q.DL.getStructLayout(STy);
1316         uint64_t Offset = SL->getElementOffset(Idx);
1317         TrailZ = std::min<unsigned>(TrailZ,
1318                                     countTrailingZeros(Offset));
1319       } else {
1320         // Handle array index arithmetic.
1321         Type *IndexedTy = GTI.getIndexedType();
1322         if (!IndexedTy->isSized()) {
1323           TrailZ = 0;
1324           break;
1325         }
1326         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1327         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1328         LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1329         computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1330         TrailZ = std::min(TrailZ,
1331                           unsigned(countTrailingZeros(TypeSize) +
1332                                    LocalKnown.countMinTrailingZeros()));
1333       }
1334     }
1335 
1336     Known.Zero.setLowBits(TrailZ);
1337     break;
1338   }
1339   case Instruction::PHI: {
1340     const PHINode *P = cast<PHINode>(I);
1341     // Handle the case of a simple two-predecessor recurrence PHI.
1342     // There's a lot more that could theoretically be done here, but
1343     // this is sufficient to catch some interesting cases.
1344     if (P->getNumIncomingValues() == 2) {
1345       for (unsigned i = 0; i != 2; ++i) {
1346         Value *L = P->getIncomingValue(i);
1347         Value *R = P->getIncomingValue(!i);
1348         Operator *LU = dyn_cast<Operator>(L);
1349         if (!LU)
1350           continue;
1351         unsigned Opcode = LU->getOpcode();
1352         // Check for operations that have the property that if
1353         // both their operands have low zero bits, the result
1354         // will have low zero bits.
1355         if (Opcode == Instruction::Add ||
1356             Opcode == Instruction::Sub ||
1357             Opcode == Instruction::And ||
1358             Opcode == Instruction::Or ||
1359             Opcode == Instruction::Mul) {
1360           Value *LL = LU->getOperand(0);
1361           Value *LR = LU->getOperand(1);
1362           // Find a recurrence.
1363           if (LL == I)
1364             L = LR;
1365           else if (LR == I)
1366             L = LL;
1367           else
1368             break;
1369           // Ok, we have a PHI of the form L op= R. Check for low
1370           // zero bits.
1371           computeKnownBits(R, Known2, Depth + 1, Q);
1372 
1373           // We need to take the minimum number of known bits
1374           KnownBits Known3(Known);
1375           computeKnownBits(L, Known3, Depth + 1, Q);
1376 
1377           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1378                                          Known3.countMinTrailingZeros()));
1379 
1380           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1381           if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1382             // If initial value of recurrence is nonnegative, and we are adding
1383             // a nonnegative number with nsw, the result can only be nonnegative
1384             // or poison value regardless of the number of times we execute the
1385             // add in phi recurrence. If initial value is negative and we are
1386             // adding a negative number with nsw, the result can only be
1387             // negative or poison value. Similar arguments apply to sub and mul.
1388             //
1389             // (add non-negative, non-negative) --> non-negative
1390             // (add negative, negative) --> negative
1391             if (Opcode == Instruction::Add) {
1392               if (Known2.isNonNegative() && Known3.isNonNegative())
1393                 Known.makeNonNegative();
1394               else if (Known2.isNegative() && Known3.isNegative())
1395                 Known.makeNegative();
1396             }
1397 
1398             // (sub nsw non-negative, negative) --> non-negative
1399             // (sub nsw negative, non-negative) --> negative
1400             else if (Opcode == Instruction::Sub && LL == I) {
1401               if (Known2.isNonNegative() && Known3.isNegative())
1402                 Known.makeNonNegative();
1403               else if (Known2.isNegative() && Known3.isNonNegative())
1404                 Known.makeNegative();
1405             }
1406 
1407             // (mul nsw non-negative, non-negative) --> non-negative
1408             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1409                      Known3.isNonNegative())
1410               Known.makeNonNegative();
1411           }
1412 
1413           break;
1414         }
1415       }
1416     }
1417 
1418     // Unreachable blocks may have zero-operand PHI nodes.
1419     if (P->getNumIncomingValues() == 0)
1420       break;
1421 
1422     // Otherwise take the unions of the known bit sets of the operands,
1423     // taking conservative care to avoid excessive recursion.
1424     if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1425       // Skip if every incoming value references to ourself.
1426       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1427         break;
1428 
1429       Known.Zero.setAllBits();
1430       Known.One.setAllBits();
1431       for (Value *IncValue : P->incoming_values()) {
1432         // Skip direct self references.
1433         if (IncValue == P) continue;
1434 
1435         Known2 = KnownBits(BitWidth);
1436         // Recurse, but cap the recursion to one level, because we don't
1437         // want to waste time spinning around in loops.
1438         computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1439         Known.Zero &= Known2.Zero;
1440         Known.One &= Known2.One;
1441         // If all bits have been ruled out, there's no need to check
1442         // more operands.
1443         if (!Known.Zero && !Known.One)
1444           break;
1445       }
1446     }
1447     break;
1448   }
1449   case Instruction::Call:
1450   case Instruction::Invoke:
1451     // If range metadata is attached to this call, set known bits from that,
1452     // and then intersect with known bits based on other properties of the
1453     // function.
1454     if (MDNode *MD =
1455             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1456       computeKnownBitsFromRangeMetadata(*MD, Known);
1457     if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1458       computeKnownBits(RV, Known2, Depth + 1, Q);
1459       Known.Zero |= Known2.Zero;
1460       Known.One |= Known2.One;
1461     }
1462     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1463       switch (II->getIntrinsicID()) {
1464       default: break;
1465       case Intrinsic::bitreverse:
1466         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1467         Known.Zero |= Known2.Zero.reverseBits();
1468         Known.One |= Known2.One.reverseBits();
1469         break;
1470       case Intrinsic::bswap:
1471         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1472         Known.Zero |= Known2.Zero.byteSwap();
1473         Known.One |= Known2.One.byteSwap();
1474         break;
1475       case Intrinsic::ctlz: {
1476         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1477         // If we have a known 1, its position is our upper bound.
1478         unsigned PossibleLZ = Known2.One.countLeadingZeros();
1479         // If this call is undefined for 0, the result will be less than 2^n.
1480         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1481           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1482         unsigned LowBits = Log2_32(PossibleLZ)+1;
1483         Known.Zero.setBitsFrom(LowBits);
1484         break;
1485       }
1486       case Intrinsic::cttz: {
1487         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1488         // If we have a known 1, its position is our upper bound.
1489         unsigned PossibleTZ = Known2.One.countTrailingZeros();
1490         // If this call is undefined for 0, the result will be less than 2^n.
1491         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1492           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1493         unsigned LowBits = Log2_32(PossibleTZ)+1;
1494         Known.Zero.setBitsFrom(LowBits);
1495         break;
1496       }
1497       case Intrinsic::ctpop: {
1498         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1499         // We can bound the space the count needs.  Also, bits known to be zero
1500         // can't contribute to the population.
1501         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1502         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1503         Known.Zero.setBitsFrom(LowBits);
1504         // TODO: we could bound KnownOne using the lower bound on the number
1505         // of bits which might be set provided by popcnt KnownOne2.
1506         break;
1507       }
1508       case Intrinsic::x86_sse42_crc32_64_64:
1509         Known.Zero.setBitsFrom(32);
1510         break;
1511       }
1512     }
1513     break;
1514   case Instruction::ExtractElement:
1515     // Look through extract element. At the moment we keep this simple and skip
1516     // tracking the specific element. But at least we might find information
1517     // valid for all elements of the vector (for example if vector is sign
1518     // extended, shifted, etc).
1519     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1520     break;
1521   case Instruction::ExtractValue:
1522     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1523       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1524       if (EVI->getNumIndices() != 1) break;
1525       if (EVI->getIndices()[0] == 0) {
1526         switch (II->getIntrinsicID()) {
1527         default: break;
1528         case Intrinsic::uadd_with_overflow:
1529         case Intrinsic::sadd_with_overflow:
1530           computeKnownBitsAddSub(true, II->getArgOperand(0),
1531                                  II->getArgOperand(1), false, Known, Known2,
1532                                  Depth, Q);
1533           break;
1534         case Intrinsic::usub_with_overflow:
1535         case Intrinsic::ssub_with_overflow:
1536           computeKnownBitsAddSub(false, II->getArgOperand(0),
1537                                  II->getArgOperand(1), false, Known, Known2,
1538                                  Depth, Q);
1539           break;
1540         case Intrinsic::umul_with_overflow:
1541         case Intrinsic::smul_with_overflow:
1542           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1543                               Known, Known2, Depth, Q);
1544           break;
1545         }
1546       }
1547     }
1548   }
1549 }
1550 
1551 /// Determine which bits of V are known to be either zero or one and return
1552 /// them.
1553 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1554   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1555   computeKnownBits(V, Known, Depth, Q);
1556   return Known;
1557 }
1558 
1559 /// Determine which bits of V are known to be either zero or one and return
1560 /// them in the Known bit set.
1561 ///
1562 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1563 /// we cannot optimize based on the assumption that it is zero without changing
1564 /// it to be an explicit zero.  If we don't change it to zero, other code could
1565 /// optimized based on the contradictory assumption that it is non-zero.
1566 /// Because instcombine aggressively folds operations with undef args anyway,
1567 /// this won't lose us code quality.
1568 ///
1569 /// This function is defined on values with integer type, values with pointer
1570 /// type, and vectors of integers.  In the case
1571 /// where V is a vector, known zero, and known one values are the
1572 /// same width as the vector element, and the bit is set only if it is true
1573 /// for all of the elements in the vector.
1574 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1575                       const Query &Q) {
1576   assert(V && "No Value?");
1577   assert(Depth <= MaxDepth && "Limit Search Depth");
1578   unsigned BitWidth = Known.getBitWidth();
1579 
1580   assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1581           V->getType()->isPtrOrPtrVectorTy()) &&
1582          "Not integer or pointer type!");
1583 
1584   Type *ScalarTy = V->getType()->getScalarType();
1585   unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1586     Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1587   assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
1588   (void)BitWidth;
1589   (void)ExpectedWidth;
1590 
1591   const APInt *C;
1592   if (match(V, m_APInt(C))) {
1593     // We know all of the bits for a scalar constant or a splat vector constant!
1594     Known.One = *C;
1595     Known.Zero = ~Known.One;
1596     return;
1597   }
1598   // Null and aggregate-zero are all-zeros.
1599   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1600     Known.setAllZero();
1601     return;
1602   }
1603   // Handle a constant vector by taking the intersection of the known bits of
1604   // each element.
1605   if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1606     // We know that CDS must be a vector of integers. Take the intersection of
1607     // each element.
1608     Known.Zero.setAllBits(); Known.One.setAllBits();
1609     for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1610       APInt Elt = CDS->getElementAsAPInt(i);
1611       Known.Zero &= ~Elt;
1612       Known.One &= Elt;
1613     }
1614     return;
1615   }
1616 
1617   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1618     // We know that CV must be a vector of integers. Take the intersection of
1619     // each element.
1620     Known.Zero.setAllBits(); Known.One.setAllBits();
1621     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1622       Constant *Element = CV->getAggregateElement(i);
1623       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1624       if (!ElementCI) {
1625         Known.resetAll();
1626         return;
1627       }
1628       const APInt &Elt = ElementCI->getValue();
1629       Known.Zero &= ~Elt;
1630       Known.One &= Elt;
1631     }
1632     return;
1633   }
1634 
1635   // Start out not knowing anything.
1636   Known.resetAll();
1637 
1638   // We can't imply anything about undefs.
1639   if (isa<UndefValue>(V))
1640     return;
1641 
1642   // There's no point in looking through other users of ConstantData for
1643   // assumptions.  Confirm that we've handled them all.
1644   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1645 
1646   // Limit search depth.
1647   // All recursive calls that increase depth must come after this.
1648   if (Depth == MaxDepth)
1649     return;
1650 
1651   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1652   // the bits of its aliasee.
1653   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1654     if (!GA->isInterposable())
1655       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1656     return;
1657   }
1658 
1659   if (const Operator *I = dyn_cast<Operator>(V))
1660     computeKnownBitsFromOperator(I, Known, Depth, Q);
1661 
1662   // Aligned pointers have trailing zeros - refine Known.Zero set
1663   if (V->getType()->isPointerTy()) {
1664     unsigned Align = V->getPointerAlignment(Q.DL);
1665     if (Align)
1666       Known.Zero.setLowBits(countTrailingZeros(Align));
1667   }
1668 
1669   // computeKnownBitsFromAssume strictly refines Known.
1670   // Therefore, we run them after computeKnownBitsFromOperator.
1671 
1672   // Check whether a nearby assume intrinsic can determine some known bits.
1673   computeKnownBitsFromAssume(V, Known, Depth, Q);
1674 
1675   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1676 }
1677 
1678 /// Return true if the given value is known to have exactly one
1679 /// bit set when defined. For vectors return true if every element is known to
1680 /// be a power of two when defined. Supports values with integer or pointer
1681 /// types and vectors of integers.
1682 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1683                             const Query &Q) {
1684   assert(Depth <= MaxDepth && "Limit Search Depth");
1685 
1686   // Attempt to match against constants.
1687   if (OrZero && match(V, m_Power2OrZero()))
1688       return true;
1689   if (match(V, m_Power2()))
1690       return true;
1691 
1692   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1693   // it is shifted off the end then the result is undefined.
1694   if (match(V, m_Shl(m_One(), m_Value())))
1695     return true;
1696 
1697   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1698   // the bottom.  If it is shifted off the bottom then the result is undefined.
1699   if (match(V, m_LShr(m_SignMask(), m_Value())))
1700     return true;
1701 
1702   // The remaining tests are all recursive, so bail out if we hit the limit.
1703   if (Depth++ == MaxDepth)
1704     return false;
1705 
1706   Value *X = nullptr, *Y = nullptr;
1707   // A shift left or a logical shift right of a power of two is a power of two
1708   // or zero.
1709   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1710                  match(V, m_LShr(m_Value(X), m_Value()))))
1711     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1712 
1713   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1714     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1715 
1716   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1717     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1718            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1719 
1720   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1721     // A power of two and'd with anything is a power of two or zero.
1722     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1723         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1724       return true;
1725     // X & (-X) is always a power of two or zero.
1726     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1727       return true;
1728     return false;
1729   }
1730 
1731   // Adding a power-of-two or zero to the same power-of-two or zero yields
1732   // either the original power-of-two, a larger power-of-two or zero.
1733   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1734     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1735     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1736         Q.IIQ.hasNoSignedWrap(VOBO)) {
1737       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1738           match(X, m_And(m_Value(), m_Specific(Y))))
1739         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1740           return true;
1741       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1742           match(Y, m_And(m_Value(), m_Specific(X))))
1743         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1744           return true;
1745 
1746       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1747       KnownBits LHSBits(BitWidth);
1748       computeKnownBits(X, LHSBits, Depth, Q);
1749 
1750       KnownBits RHSBits(BitWidth);
1751       computeKnownBits(Y, RHSBits, Depth, Q);
1752       // If i8 V is a power of two or zero:
1753       //  ZeroBits: 1 1 1 0 1 1 1 1
1754       // ~ZeroBits: 0 0 0 1 0 0 0 0
1755       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1756         // If OrZero isn't set, we cannot give back a zero result.
1757         // Make sure either the LHS or RHS has a bit set.
1758         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1759           return true;
1760     }
1761   }
1762 
1763   // An exact divide or right shift can only shift off zero bits, so the result
1764   // is a power of two only if the first operand is a power of two and not
1765   // copying a sign bit (sdiv int_min, 2).
1766   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1767       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1768     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1769                                   Depth, Q);
1770   }
1771 
1772   return false;
1773 }
1774 
1775 /// Test whether a GEP's result is known to be non-null.
1776 ///
1777 /// Uses properties inherent in a GEP to try to determine whether it is known
1778 /// to be non-null.
1779 ///
1780 /// Currently this routine does not support vector GEPs.
1781 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1782                               const Query &Q) {
1783   const Function *F = nullptr;
1784   if (const Instruction *I = dyn_cast<Instruction>(GEP))
1785     F = I->getFunction();
1786 
1787   if (!GEP->isInBounds() ||
1788       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
1789     return false;
1790 
1791   // FIXME: Support vector-GEPs.
1792   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1793 
1794   // If the base pointer is non-null, we cannot walk to a null address with an
1795   // inbounds GEP in address space zero.
1796   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1797     return true;
1798 
1799   // Walk the GEP operands and see if any operand introduces a non-zero offset.
1800   // If so, then the GEP cannot produce a null pointer, as doing so would
1801   // inherently violate the inbounds contract within address space zero.
1802   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1803        GTI != GTE; ++GTI) {
1804     // Struct types are easy -- they must always be indexed by a constant.
1805     if (StructType *STy = GTI.getStructTypeOrNull()) {
1806       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1807       unsigned ElementIdx = OpC->getZExtValue();
1808       const StructLayout *SL = Q.DL.getStructLayout(STy);
1809       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1810       if (ElementOffset > 0)
1811         return true;
1812       continue;
1813     }
1814 
1815     // If we have a zero-sized type, the index doesn't matter. Keep looping.
1816     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1817       continue;
1818 
1819     // Fast path the constant operand case both for efficiency and so we don't
1820     // increment Depth when just zipping down an all-constant GEP.
1821     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1822       if (!OpC->isZero())
1823         return true;
1824       continue;
1825     }
1826 
1827     // We post-increment Depth here because while isKnownNonZero increments it
1828     // as well, when we pop back up that increment won't persist. We don't want
1829     // to recurse 10k times just because we have 10k GEP operands. We don't
1830     // bail completely out because we want to handle constant GEPs regardless
1831     // of depth.
1832     if (Depth++ >= MaxDepth)
1833       continue;
1834 
1835     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1836       return true;
1837   }
1838 
1839   return false;
1840 }
1841 
1842 static bool isKnownNonNullFromDominatingCondition(const Value *V,
1843                                                   const Instruction *CtxI,
1844                                                   const DominatorTree *DT) {
1845   assert(V->getType()->isPointerTy() && "V must be pointer type");
1846   assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
1847 
1848   if (!CtxI || !DT)
1849     return false;
1850 
1851   unsigned NumUsesExplored = 0;
1852   for (auto *U : V->users()) {
1853     // Avoid massive lists
1854     if (NumUsesExplored >= DomConditionsMaxUses)
1855       break;
1856     NumUsesExplored++;
1857 
1858     // If the value is used as an argument to a call or invoke, then argument
1859     // attributes may provide an answer about null-ness.
1860     if (auto CS = ImmutableCallSite(U))
1861       if (auto *CalledFunc = CS.getCalledFunction())
1862         for (const Argument &Arg : CalledFunc->args())
1863           if (CS.getArgOperand(Arg.getArgNo()) == V &&
1864               Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1865             return true;
1866 
1867     // Consider only compare instructions uniquely controlling a branch
1868     CmpInst::Predicate Pred;
1869     if (!match(const_cast<User *>(U),
1870                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1871         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1872       continue;
1873 
1874     SmallVector<const User *, 4> WorkList;
1875     SmallPtrSet<const User *, 4> Visited;
1876     for (auto *CmpU : U->users()) {
1877       assert(WorkList.empty() && "Should be!");
1878       if (Visited.insert(CmpU).second)
1879         WorkList.push_back(CmpU);
1880 
1881       while (!WorkList.empty()) {
1882         auto *Curr = WorkList.pop_back_val();
1883 
1884         // If a user is an AND, add all its users to the work list. We only
1885         // propagate "pred != null" condition through AND because it is only
1886         // correct to assume that all conditions of AND are met in true branch.
1887         // TODO: Support similar logic of OR and EQ predicate?
1888         if (Pred == ICmpInst::ICMP_NE)
1889           if (auto *BO = dyn_cast<BinaryOperator>(Curr))
1890             if (BO->getOpcode() == Instruction::And) {
1891               for (auto *BOU : BO->users())
1892                 if (Visited.insert(BOU).second)
1893                   WorkList.push_back(BOU);
1894               continue;
1895             }
1896 
1897         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
1898           assert(BI->isConditional() && "uses a comparison!");
1899 
1900           BasicBlock *NonNullSuccessor =
1901               BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1902           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1903           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1904             return true;
1905         } else if (Pred == ICmpInst::ICMP_NE &&
1906                    match(Curr, m_Intrinsic<Intrinsic::experimental_guard>()) &&
1907                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
1908           return true;
1909         }
1910       }
1911     }
1912   }
1913 
1914   return false;
1915 }
1916 
1917 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1918 /// ensure that the value it's attached to is never Value?  'RangeType' is
1919 /// is the type of the value described by the range.
1920 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1921   const unsigned NumRanges = Ranges->getNumOperands() / 2;
1922   assert(NumRanges >= 1);
1923   for (unsigned i = 0; i < NumRanges; ++i) {
1924     ConstantInt *Lower =
1925         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1926     ConstantInt *Upper =
1927         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1928     ConstantRange Range(Lower->getValue(), Upper->getValue());
1929     if (Range.contains(Value))
1930       return false;
1931   }
1932   return true;
1933 }
1934 
1935 /// Return true if the given value is known to be non-zero when defined. For
1936 /// vectors, return true if every element is known to be non-zero when
1937 /// defined. For pointers, if the context instruction and dominator tree are
1938 /// specified, perform context-sensitive analysis and return true if the
1939 /// pointer couldn't possibly be null at the specified instruction.
1940 /// Supports values with integer or pointer type and vectors of integers.
1941 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1942   if (auto *C = dyn_cast<Constant>(V)) {
1943     if (C->isNullValue())
1944       return false;
1945     if (isa<ConstantInt>(C))
1946       // Must be non-zero due to null test above.
1947       return true;
1948 
1949     // For constant vectors, check that all elements are undefined or known
1950     // non-zero to determine that the whole vector is known non-zero.
1951     if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1952       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1953         Constant *Elt = C->getAggregateElement(i);
1954         if (!Elt || Elt->isNullValue())
1955           return false;
1956         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1957           return false;
1958       }
1959       return true;
1960     }
1961 
1962     // A global variable in address space 0 is non null unless extern weak
1963     // or an absolute symbol reference. Other address spaces may have null as a
1964     // valid address for a global, so we can't assume anything.
1965     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1966       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
1967           GV->getType()->getAddressSpace() == 0)
1968         return true;
1969     } else
1970       return false;
1971   }
1972 
1973   if (auto *I = dyn_cast<Instruction>(V)) {
1974     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
1975       // If the possible ranges don't contain zero, then the value is
1976       // definitely non-zero.
1977       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1978         const APInt ZeroValue(Ty->getBitWidth(), 0);
1979         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1980           return true;
1981       }
1982     }
1983   }
1984 
1985   // Some of the tests below are recursive, so bail out if we hit the limit.
1986   if (Depth++ >= MaxDepth)
1987     return false;
1988 
1989   // Check for pointer simplifications.
1990   if (V->getType()->isPointerTy()) {
1991     // Alloca never returns null, malloc might.
1992     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
1993       return true;
1994 
1995     // A byval, inalloca, or nonnull argument is never null.
1996     if (const Argument *A = dyn_cast<Argument>(V))
1997       if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
1998         return true;
1999 
2000     // A Load tagged with nonnull metadata is never null.
2001     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2002       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2003         return true;
2004 
2005     if (auto CS = ImmutableCallSite(V)) {
2006       if (CS.isReturnNonNull())
2007         return true;
2008       if (const auto *RP = getArgumentAliasingToReturnedPointer(CS))
2009         return isKnownNonZero(RP, Depth, Q);
2010     }
2011   }
2012 
2013 
2014   // Check for recursive pointer simplifications.
2015   if (V->getType()->isPointerTy()) {
2016     if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2017       return true;
2018 
2019     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2020       if (isGEPKnownNonNull(GEP, Depth, Q))
2021         return true;
2022   }
2023 
2024   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2025 
2026   // X | Y != 0 if X != 0 or Y != 0.
2027   Value *X = nullptr, *Y = nullptr;
2028   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2029     return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
2030 
2031   // ext X != 0 if X != 0.
2032   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2033     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2034 
2035   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2036   // if the lowest bit is shifted off the end.
2037   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2038     // shl nuw can't remove any non-zero bits.
2039     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2040     if (Q.IIQ.hasNoUnsignedWrap(BO))
2041       return isKnownNonZero(X, Depth, Q);
2042 
2043     KnownBits Known(BitWidth);
2044     computeKnownBits(X, Known, Depth, Q);
2045     if (Known.One[0])
2046       return true;
2047   }
2048   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2049   // defined if the sign bit is shifted off the end.
2050   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2051     // shr exact can only shift out zero bits.
2052     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2053     if (BO->isExact())
2054       return isKnownNonZero(X, Depth, Q);
2055 
2056     KnownBits Known = computeKnownBits(X, Depth, Q);
2057     if (Known.isNegative())
2058       return true;
2059 
2060     // If the shifter operand is a constant, and all of the bits shifted
2061     // out are known to be zero, and X is known non-zero then at least one
2062     // non-zero bit must remain.
2063     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2064       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2065       // Is there a known one in the portion not shifted out?
2066       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2067         return true;
2068       // Are all the bits to be shifted out known zero?
2069       if (Known.countMinTrailingZeros() >= ShiftVal)
2070         return isKnownNonZero(X, Depth, Q);
2071     }
2072   }
2073   // div exact can only produce a zero if the dividend is zero.
2074   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2075     return isKnownNonZero(X, Depth, Q);
2076   }
2077   // X + Y.
2078   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2079     KnownBits XKnown = computeKnownBits(X, Depth, Q);
2080     KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2081 
2082     // If X and Y are both non-negative (as signed values) then their sum is not
2083     // zero unless both X and Y are zero.
2084     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2085       if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2086         return true;
2087 
2088     // If X and Y are both negative (as signed values) then their sum is not
2089     // zero unless both X and Y equal INT_MIN.
2090     if (XKnown.isNegative() && YKnown.isNegative()) {
2091       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2092       // The sign bit of X is set.  If some other bit is set then X is not equal
2093       // to INT_MIN.
2094       if (XKnown.One.intersects(Mask))
2095         return true;
2096       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2097       // to INT_MIN.
2098       if (YKnown.One.intersects(Mask))
2099         return true;
2100     }
2101 
2102     // The sum of a non-negative number and a power of two is not zero.
2103     if (XKnown.isNonNegative() &&
2104         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2105       return true;
2106     if (YKnown.isNonNegative() &&
2107         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2108       return true;
2109   }
2110   // X * Y.
2111   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2112     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2113     // If X and Y are non-zero then so is X * Y as long as the multiplication
2114     // does not overflow.
2115     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2116         isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2117       return true;
2118   }
2119   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2120   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2121     if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2122         isKnownNonZero(SI->getFalseValue(), Depth, Q))
2123       return true;
2124   }
2125   // PHI
2126   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2127     // Try and detect a recurrence that monotonically increases from a
2128     // starting value, as these are common as induction variables.
2129     if (PN->getNumIncomingValues() == 2) {
2130       Value *Start = PN->getIncomingValue(0);
2131       Value *Induction = PN->getIncomingValue(1);
2132       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2133         std::swap(Start, Induction);
2134       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2135         if (!C->isZero() && !C->isNegative()) {
2136           ConstantInt *X;
2137           if (Q.IIQ.UseInstrInfo &&
2138               (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2139                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2140               !X->isNegative())
2141             return true;
2142         }
2143       }
2144     }
2145     // Check if all incoming values are non-zero constant.
2146     bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2147       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2148     });
2149     if (AllNonZeroConstants)
2150       return true;
2151   }
2152 
2153   KnownBits Known(BitWidth);
2154   computeKnownBits(V, Known, Depth, Q);
2155   return Known.One != 0;
2156 }
2157 
2158 /// Return true if V2 == V1 + X, where X is known non-zero.
2159 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2160   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2161   if (!BO || BO->getOpcode() != Instruction::Add)
2162     return false;
2163   Value *Op = nullptr;
2164   if (V2 == BO->getOperand(0))
2165     Op = BO->getOperand(1);
2166   else if (V2 == BO->getOperand(1))
2167     Op = BO->getOperand(0);
2168   else
2169     return false;
2170   return isKnownNonZero(Op, 0, Q);
2171 }
2172 
2173 /// Return true if it is known that V1 != V2.
2174 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2175   if (V1 == V2)
2176     return false;
2177   if (V1->getType() != V2->getType())
2178     // We can't look through casts yet.
2179     return false;
2180   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2181     return true;
2182 
2183   if (V1->getType()->isIntOrIntVectorTy()) {
2184     // Are any known bits in V1 contradictory to known bits in V2? If V1
2185     // has a known zero where V2 has a known one, they must not be equal.
2186     KnownBits Known1 = computeKnownBits(V1, 0, Q);
2187     KnownBits Known2 = computeKnownBits(V2, 0, Q);
2188 
2189     if (Known1.Zero.intersects(Known2.One) ||
2190         Known2.Zero.intersects(Known1.One))
2191       return true;
2192   }
2193   return false;
2194 }
2195 
2196 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2197 /// simplify operations downstream. Mask is known to be zero for bits that V
2198 /// cannot have.
2199 ///
2200 /// This function is defined on values with integer type, values with pointer
2201 /// type, and vectors of integers.  In the case
2202 /// where V is a vector, the mask, known zero, and known one values are the
2203 /// same width as the vector element, and the bit is set only if it is true
2204 /// for all of the elements in the vector.
2205 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2206                        const Query &Q) {
2207   KnownBits Known(Mask.getBitWidth());
2208   computeKnownBits(V, Known, Depth, Q);
2209   return Mask.isSubsetOf(Known.Zero);
2210 }
2211 
2212 /// For vector constants, loop over the elements and find the constant with the
2213 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2214 /// or if any element was not analyzed; otherwise, return the count for the
2215 /// element with the minimum number of sign bits.
2216 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2217                                                  unsigned TyBits) {
2218   const auto *CV = dyn_cast<Constant>(V);
2219   if (!CV || !CV->getType()->isVectorTy())
2220     return 0;
2221 
2222   unsigned MinSignBits = TyBits;
2223   unsigned NumElts = CV->getType()->getVectorNumElements();
2224   for (unsigned i = 0; i != NumElts; ++i) {
2225     // If we find a non-ConstantInt, bail out.
2226     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2227     if (!Elt)
2228       return 0;
2229 
2230     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2231   }
2232 
2233   return MinSignBits;
2234 }
2235 
2236 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2237                                        const Query &Q);
2238 
2239 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2240                                    const Query &Q) {
2241   unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2242   assert(Result > 0 && "At least one sign bit needs to be present!");
2243   return Result;
2244 }
2245 
2246 /// Return the number of times the sign bit of the register is replicated into
2247 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2248 /// (itself), but other cases can give us information. For example, immediately
2249 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2250 /// other, so we return 3. For vectors, return the number of sign bits for the
2251 /// vector element with the minimum number of known sign bits.
2252 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2253                                        const Query &Q) {
2254   assert(Depth <= MaxDepth && "Limit Search Depth");
2255 
2256   // We return the minimum number of sign bits that are guaranteed to be present
2257   // in V, so for undef we have to conservatively return 1.  We don't have the
2258   // same behavior for poison though -- that's a FIXME today.
2259 
2260   Type *ScalarTy = V->getType()->getScalarType();
2261   unsigned TyBits = ScalarTy->isPointerTy() ?
2262     Q.DL.getIndexTypeSizeInBits(ScalarTy) :
2263     Q.DL.getTypeSizeInBits(ScalarTy);
2264 
2265   unsigned Tmp, Tmp2;
2266   unsigned FirstAnswer = 1;
2267 
2268   // Note that ConstantInt is handled by the general computeKnownBits case
2269   // below.
2270 
2271   if (Depth == MaxDepth)
2272     return 1;  // Limit search depth.
2273 
2274   const Operator *U = dyn_cast<Operator>(V);
2275   switch (Operator::getOpcode(V)) {
2276   default: break;
2277   case Instruction::SExt:
2278     Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2279     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2280 
2281   case Instruction::SDiv: {
2282     const APInt *Denominator;
2283     // sdiv X, C -> adds log(C) sign bits.
2284     if (match(U->getOperand(1), m_APInt(Denominator))) {
2285 
2286       // Ignore non-positive denominator.
2287       if (!Denominator->isStrictlyPositive())
2288         break;
2289 
2290       // Calculate the incoming numerator bits.
2291       unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2292 
2293       // Add floor(log(C)) bits to the numerator bits.
2294       return std::min(TyBits, NumBits + Denominator->logBase2());
2295     }
2296     break;
2297   }
2298 
2299   case Instruction::SRem: {
2300     const APInt *Denominator;
2301     // srem X, C -> we know that the result is within [-C+1,C) when C is a
2302     // positive constant.  This let us put a lower bound on the number of sign
2303     // bits.
2304     if (match(U->getOperand(1), m_APInt(Denominator))) {
2305 
2306       // Ignore non-positive denominator.
2307       if (!Denominator->isStrictlyPositive())
2308         break;
2309 
2310       // Calculate the incoming numerator bits. SRem by a positive constant
2311       // can't lower the number of sign bits.
2312       unsigned NumrBits =
2313           ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2314 
2315       // Calculate the leading sign bit constraints by examining the
2316       // denominator.  Given that the denominator is positive, there are two
2317       // cases:
2318       //
2319       //  1. the numerator is positive.  The result range is [0,C) and [0,C) u<
2320       //     (1 << ceilLogBase2(C)).
2321       //
2322       //  2. the numerator is negative.  Then the result range is (-C,0] and
2323       //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2324       //
2325       // Thus a lower bound on the number of sign bits is `TyBits -
2326       // ceilLogBase2(C)`.
2327 
2328       unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2329       return std::max(NumrBits, ResBits);
2330     }
2331     break;
2332   }
2333 
2334   case Instruction::AShr: {
2335     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2336     // ashr X, C   -> adds C sign bits.  Vectors too.
2337     const APInt *ShAmt;
2338     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2339       if (ShAmt->uge(TyBits))
2340         break;  // Bad shift.
2341       unsigned ShAmtLimited = ShAmt->getZExtValue();
2342       Tmp += ShAmtLimited;
2343       if (Tmp > TyBits) Tmp = TyBits;
2344     }
2345     return Tmp;
2346   }
2347   case Instruction::Shl: {
2348     const APInt *ShAmt;
2349     if (match(U->getOperand(1), m_APInt(ShAmt))) {
2350       // shl destroys sign bits.
2351       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2352       if (ShAmt->uge(TyBits) ||      // Bad shift.
2353           ShAmt->uge(Tmp)) break;    // Shifted all sign bits out.
2354       Tmp2 = ShAmt->getZExtValue();
2355       return Tmp - Tmp2;
2356     }
2357     break;
2358   }
2359   case Instruction::And:
2360   case Instruction::Or:
2361   case Instruction::Xor:    // NOT is handled here.
2362     // Logical binary ops preserve the number of sign bits at the worst.
2363     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2364     if (Tmp != 1) {
2365       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2366       FirstAnswer = std::min(Tmp, Tmp2);
2367       // We computed what we know about the sign bits as our first
2368       // answer. Now proceed to the generic code that uses
2369       // computeKnownBits, and pick whichever answer is better.
2370     }
2371     break;
2372 
2373   case Instruction::Select:
2374     Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2375     if (Tmp == 1) break;
2376     Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2377     return std::min(Tmp, Tmp2);
2378 
2379   case Instruction::Add:
2380     // Add can have at most one carry bit.  Thus we know that the output
2381     // is, at worst, one more bit than the inputs.
2382     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2383     if (Tmp == 1) break;
2384 
2385     // Special case decrementing a value (ADD X, -1):
2386     if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2387       if (CRHS->isAllOnesValue()) {
2388         KnownBits Known(TyBits);
2389         computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2390 
2391         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2392         // sign bits set.
2393         if ((Known.Zero | 1).isAllOnesValue())
2394           return TyBits;
2395 
2396         // If we are subtracting one from a positive number, there is no carry
2397         // out of the result.
2398         if (Known.isNonNegative())
2399           return Tmp;
2400       }
2401 
2402     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2403     if (Tmp2 == 1) break;
2404     return std::min(Tmp, Tmp2)-1;
2405 
2406   case Instruction::Sub:
2407     Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2408     if (Tmp2 == 1) break;
2409 
2410     // Handle NEG.
2411     if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2412       if (CLHS->isNullValue()) {
2413         KnownBits Known(TyBits);
2414         computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2415         // If the input is known to be 0 or 1, the output is 0/-1, which is all
2416         // sign bits set.
2417         if ((Known.Zero | 1).isAllOnesValue())
2418           return TyBits;
2419 
2420         // If the input is known to be positive (the sign bit is known clear),
2421         // the output of the NEG has the same number of sign bits as the input.
2422         if (Known.isNonNegative())
2423           return Tmp2;
2424 
2425         // Otherwise, we treat this like a SUB.
2426       }
2427 
2428     // Sub can have at most one carry bit.  Thus we know that the output
2429     // is, at worst, one more bit than the inputs.
2430     Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2431     if (Tmp == 1) break;
2432     return std::min(Tmp, Tmp2)-1;
2433 
2434   case Instruction::Mul: {
2435     // The output of the Mul can be at most twice the valid bits in the inputs.
2436     unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2437     if (SignBitsOp0 == 1) break;
2438     unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2439     if (SignBitsOp1 == 1) break;
2440     unsigned OutValidBits =
2441         (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2442     return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2443   }
2444 
2445   case Instruction::PHI: {
2446     const PHINode *PN = cast<PHINode>(U);
2447     unsigned NumIncomingValues = PN->getNumIncomingValues();
2448     // Don't analyze large in-degree PHIs.
2449     if (NumIncomingValues > 4) break;
2450     // Unreachable blocks may have zero-operand PHI nodes.
2451     if (NumIncomingValues == 0) break;
2452 
2453     // Take the minimum of all incoming values.  This can't infinitely loop
2454     // because of our depth threshold.
2455     Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2456     for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2457       if (Tmp == 1) return Tmp;
2458       Tmp = std::min(
2459           Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2460     }
2461     return Tmp;
2462   }
2463 
2464   case Instruction::Trunc:
2465     // FIXME: it's tricky to do anything useful for this, but it is an important
2466     // case for targets like X86.
2467     break;
2468 
2469   case Instruction::ExtractElement:
2470     // Look through extract element. At the moment we keep this simple and skip
2471     // tracking the specific element. But at least we might find information
2472     // valid for all elements of the vector (for example if vector is sign
2473     // extended, shifted, etc).
2474     return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2475   }
2476 
2477   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2478   // use this information.
2479 
2480   // If we can examine all elements of a vector constant successfully, we're
2481   // done (we can't do any better than that). If not, keep trying.
2482   if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2483     return VecSignBits;
2484 
2485   KnownBits Known(TyBits);
2486   computeKnownBits(V, Known, Depth, Q);
2487 
2488   // If we know that the sign bit is either zero or one, determine the number of
2489   // identical bits in the top of the input value.
2490   return std::max(FirstAnswer, Known.countMinSignBits());
2491 }
2492 
2493 /// This function computes the integer multiple of Base that equals V.
2494 /// If successful, it returns true and returns the multiple in
2495 /// Multiple. If unsuccessful, it returns false. It looks
2496 /// through SExt instructions only if LookThroughSExt is true.
2497 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2498                            bool LookThroughSExt, unsigned Depth) {
2499   const unsigned MaxDepth = 6;
2500 
2501   assert(V && "No Value?");
2502   assert(Depth <= MaxDepth && "Limit Search Depth");
2503   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2504 
2505   Type *T = V->getType();
2506 
2507   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2508 
2509   if (Base == 0)
2510     return false;
2511 
2512   if (Base == 1) {
2513     Multiple = V;
2514     return true;
2515   }
2516 
2517   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2518   Constant *BaseVal = ConstantInt::get(T, Base);
2519   if (CO && CO == BaseVal) {
2520     // Multiple is 1.
2521     Multiple = ConstantInt::get(T, 1);
2522     return true;
2523   }
2524 
2525   if (CI && CI->getZExtValue() % Base == 0) {
2526     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2527     return true;
2528   }
2529 
2530   if (Depth == MaxDepth) return false;  // Limit search depth.
2531 
2532   Operator *I = dyn_cast<Operator>(V);
2533   if (!I) return false;
2534 
2535   switch (I->getOpcode()) {
2536   default: break;
2537   case Instruction::SExt:
2538     if (!LookThroughSExt) return false;
2539     // otherwise fall through to ZExt
2540     LLVM_FALLTHROUGH;
2541   case Instruction::ZExt:
2542     return ComputeMultiple(I->getOperand(0), Base, Multiple,
2543                            LookThroughSExt, Depth+1);
2544   case Instruction::Shl:
2545   case Instruction::Mul: {
2546     Value *Op0 = I->getOperand(0);
2547     Value *Op1 = I->getOperand(1);
2548 
2549     if (I->getOpcode() == Instruction::Shl) {
2550       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2551       if (!Op1CI) return false;
2552       // Turn Op0 << Op1 into Op0 * 2^Op1
2553       APInt Op1Int = Op1CI->getValue();
2554       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2555       APInt API(Op1Int.getBitWidth(), 0);
2556       API.setBit(BitToSet);
2557       Op1 = ConstantInt::get(V->getContext(), API);
2558     }
2559 
2560     Value *Mul0 = nullptr;
2561     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2562       if (Constant *Op1C = dyn_cast<Constant>(Op1))
2563         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2564           if (Op1C->getType()->getPrimitiveSizeInBits() <
2565               MulC->getType()->getPrimitiveSizeInBits())
2566             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2567           if (Op1C->getType()->getPrimitiveSizeInBits() >
2568               MulC->getType()->getPrimitiveSizeInBits())
2569             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2570 
2571           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2572           Multiple = ConstantExpr::getMul(MulC, Op1C);
2573           return true;
2574         }
2575 
2576       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2577         if (Mul0CI->getValue() == 1) {
2578           // V == Base * Op1, so return Op1
2579           Multiple = Op1;
2580           return true;
2581         }
2582     }
2583 
2584     Value *Mul1 = nullptr;
2585     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2586       if (Constant *Op0C = dyn_cast<Constant>(Op0))
2587         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2588           if (Op0C->getType()->getPrimitiveSizeInBits() <
2589               MulC->getType()->getPrimitiveSizeInBits())
2590             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2591           if (Op0C->getType()->getPrimitiveSizeInBits() >
2592               MulC->getType()->getPrimitiveSizeInBits())
2593             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2594 
2595           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2596           Multiple = ConstantExpr::getMul(MulC, Op0C);
2597           return true;
2598         }
2599 
2600       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2601         if (Mul1CI->getValue() == 1) {
2602           // V == Base * Op0, so return Op0
2603           Multiple = Op0;
2604           return true;
2605         }
2606     }
2607   }
2608   }
2609 
2610   // We could not determine if V is a multiple of Base.
2611   return false;
2612 }
2613 
2614 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS,
2615                                             const TargetLibraryInfo *TLI) {
2616   const Function *F = ICS.getCalledFunction();
2617   if (!F)
2618     return Intrinsic::not_intrinsic;
2619 
2620   if (F->isIntrinsic())
2621     return F->getIntrinsicID();
2622 
2623   if (!TLI)
2624     return Intrinsic::not_intrinsic;
2625 
2626   LibFunc Func;
2627   // We're going to make assumptions on the semantics of the functions, check
2628   // that the target knows that it's available in this environment and it does
2629   // not have local linkage.
2630   if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2631     return Intrinsic::not_intrinsic;
2632 
2633   if (!ICS.onlyReadsMemory())
2634     return Intrinsic::not_intrinsic;
2635 
2636   // Otherwise check if we have a call to a function that can be turned into a
2637   // vector intrinsic.
2638   switch (Func) {
2639   default:
2640     break;
2641   case LibFunc_sin:
2642   case LibFunc_sinf:
2643   case LibFunc_sinl:
2644     return Intrinsic::sin;
2645   case LibFunc_cos:
2646   case LibFunc_cosf:
2647   case LibFunc_cosl:
2648     return Intrinsic::cos;
2649   case LibFunc_exp:
2650   case LibFunc_expf:
2651   case LibFunc_expl:
2652     return Intrinsic::exp;
2653   case LibFunc_exp2:
2654   case LibFunc_exp2f:
2655   case LibFunc_exp2l:
2656     return Intrinsic::exp2;
2657   case LibFunc_log:
2658   case LibFunc_logf:
2659   case LibFunc_logl:
2660     return Intrinsic::log;
2661   case LibFunc_log10:
2662   case LibFunc_log10f:
2663   case LibFunc_log10l:
2664     return Intrinsic::log10;
2665   case LibFunc_log2:
2666   case LibFunc_log2f:
2667   case LibFunc_log2l:
2668     return Intrinsic::log2;
2669   case LibFunc_fabs:
2670   case LibFunc_fabsf:
2671   case LibFunc_fabsl:
2672     return Intrinsic::fabs;
2673   case LibFunc_fmin:
2674   case LibFunc_fminf:
2675   case LibFunc_fminl:
2676     return Intrinsic::minnum;
2677   case LibFunc_fmax:
2678   case LibFunc_fmaxf:
2679   case LibFunc_fmaxl:
2680     return Intrinsic::maxnum;
2681   case LibFunc_copysign:
2682   case LibFunc_copysignf:
2683   case LibFunc_copysignl:
2684     return Intrinsic::copysign;
2685   case LibFunc_floor:
2686   case LibFunc_floorf:
2687   case LibFunc_floorl:
2688     return Intrinsic::floor;
2689   case LibFunc_ceil:
2690   case LibFunc_ceilf:
2691   case LibFunc_ceill:
2692     return Intrinsic::ceil;
2693   case LibFunc_trunc:
2694   case LibFunc_truncf:
2695   case LibFunc_truncl:
2696     return Intrinsic::trunc;
2697   case LibFunc_rint:
2698   case LibFunc_rintf:
2699   case LibFunc_rintl:
2700     return Intrinsic::rint;
2701   case LibFunc_nearbyint:
2702   case LibFunc_nearbyintf:
2703   case LibFunc_nearbyintl:
2704     return Intrinsic::nearbyint;
2705   case LibFunc_round:
2706   case LibFunc_roundf:
2707   case LibFunc_roundl:
2708     return Intrinsic::round;
2709   case LibFunc_pow:
2710   case LibFunc_powf:
2711   case LibFunc_powl:
2712     return Intrinsic::pow;
2713   case LibFunc_sqrt:
2714   case LibFunc_sqrtf:
2715   case LibFunc_sqrtl:
2716     return Intrinsic::sqrt;
2717   }
2718 
2719   return Intrinsic::not_intrinsic;
2720 }
2721 
2722 /// Return true if we can prove that the specified FP value is never equal to
2723 /// -0.0.
2724 ///
2725 /// NOTE: this function will need to be revisited when we support non-default
2726 /// rounding modes!
2727 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
2728                                 unsigned Depth) {
2729   if (auto *CFP = dyn_cast<ConstantFP>(V))
2730     return !CFP->getValueAPF().isNegZero();
2731 
2732   // Limit search depth.
2733   if (Depth == MaxDepth)
2734     return false;
2735 
2736   auto *Op = dyn_cast<Operator>(V);
2737   if (!Op)
2738     return false;
2739 
2740   // Check if the nsz fast-math flag is set.
2741   if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2742     if (FPO->hasNoSignedZeros())
2743       return true;
2744 
2745   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2746   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
2747     return true;
2748 
2749   // sitofp and uitofp turn into +0.0 for zero.
2750   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2751     return true;
2752 
2753   if (auto *Call = dyn_cast<CallInst>(Op)) {
2754     Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2755     switch (IID) {
2756     default:
2757       break;
2758     // sqrt(-0.0) = -0.0, no other negative results are possible.
2759     case Intrinsic::sqrt:
2760     case Intrinsic::canonicalize:
2761       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2762     // fabs(x) != -0.0
2763     case Intrinsic::fabs:
2764       return true;
2765     }
2766   }
2767 
2768   return false;
2769 }
2770 
2771 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2772 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2773 /// bit despite comparing equal.
2774 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
2775                                             const TargetLibraryInfo *TLI,
2776                                             bool SignBitOnly,
2777                                             unsigned Depth) {
2778   // TODO: This function does not do the right thing when SignBitOnly is true
2779   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2780   // which flips the sign bits of NaNs.  See
2781   // https://llvm.org/bugs/show_bug.cgi?id=31702.
2782 
2783   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2784     return !CFP->getValueAPF().isNegative() ||
2785            (!SignBitOnly && CFP->getValueAPF().isZero());
2786   }
2787 
2788   // Handle vector of constants.
2789   if (auto *CV = dyn_cast<Constant>(V)) {
2790     if (CV->getType()->isVectorTy()) {
2791       unsigned NumElts = CV->getType()->getVectorNumElements();
2792       for (unsigned i = 0; i != NumElts; ++i) {
2793         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
2794         if (!CFP)
2795           return false;
2796         if (CFP->getValueAPF().isNegative() &&
2797             (SignBitOnly || !CFP->getValueAPF().isZero()))
2798           return false;
2799       }
2800 
2801       // All non-negative ConstantFPs.
2802       return true;
2803     }
2804   }
2805 
2806   if (Depth == MaxDepth)
2807     return false; // Limit search depth.
2808 
2809   const Operator *I = dyn_cast<Operator>(V);
2810   if (!I)
2811     return false;
2812 
2813   switch (I->getOpcode()) {
2814   default:
2815     break;
2816   // Unsigned integers are always nonnegative.
2817   case Instruction::UIToFP:
2818     return true;
2819   case Instruction::FMul:
2820     // x*x is always non-negative or a NaN.
2821     if (I->getOperand(0) == I->getOperand(1) &&
2822         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2823       return true;
2824 
2825     LLVM_FALLTHROUGH;
2826   case Instruction::FAdd:
2827   case Instruction::FDiv:
2828   case Instruction::FRem:
2829     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2830                                            Depth + 1) &&
2831            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2832                                            Depth + 1);
2833   case Instruction::Select:
2834     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2835                                            Depth + 1) &&
2836            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2837                                            Depth + 1);
2838   case Instruction::FPExt:
2839   case Instruction::FPTrunc:
2840     // Widening/narrowing never change sign.
2841     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2842                                            Depth + 1);
2843   case Instruction::ExtractElement:
2844     // Look through extract element. At the moment we keep this simple and skip
2845     // tracking the specific element. But at least we might find information
2846     // valid for all elements of the vector.
2847     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2848                                            Depth + 1);
2849   case Instruction::Call:
2850     const auto *CI = cast<CallInst>(I);
2851     Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2852     switch (IID) {
2853     default:
2854       break;
2855     case Intrinsic::maxnum:
2856       return (isKnownNeverNaN(I->getOperand(0), TLI) &&
2857               cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI,
2858                                               SignBitOnly, Depth + 1)) ||
2859             (isKnownNeverNaN(I->getOperand(1), TLI) &&
2860               cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
2861                                               SignBitOnly, Depth + 1));
2862 
2863     case Intrinsic::minnum:
2864       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2865                                              Depth + 1) &&
2866              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2867                                              Depth + 1);
2868     case Intrinsic::exp:
2869     case Intrinsic::exp2:
2870     case Intrinsic::fabs:
2871       return true;
2872 
2873     case Intrinsic::sqrt:
2874       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
2875       if (!SignBitOnly)
2876         return true;
2877       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2878                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
2879 
2880     case Intrinsic::powi:
2881       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2882         // powi(x,n) is non-negative if n is even.
2883         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2884           return true;
2885       }
2886       // TODO: This is not correct.  Given that exp is an integer, here are the
2887       // ways that pow can return a negative value:
2888       //
2889       //   pow(x, exp)    --> negative if exp is odd and x is negative.
2890       //   pow(-0, exp)   --> -inf if exp is negative odd.
2891       //   pow(-0, exp)   --> -0 if exp is positive odd.
2892       //   pow(-inf, exp) --> -0 if exp is negative odd.
2893       //   pow(-inf, exp) --> -inf if exp is positive odd.
2894       //
2895       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2896       // but we must return false if x == -0.  Unfortunately we do not currently
2897       // have a way of expressing this constraint.  See details in
2898       // https://llvm.org/bugs/show_bug.cgi?id=31702.
2899       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2900                                              Depth + 1);
2901 
2902     case Intrinsic::fma:
2903     case Intrinsic::fmuladd:
2904       // x*x+y is non-negative if y is non-negative.
2905       return I->getOperand(0) == I->getOperand(1) &&
2906              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2907              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2908                                              Depth + 1);
2909     }
2910     break;
2911   }
2912   return false;
2913 }
2914 
2915 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
2916                                        const TargetLibraryInfo *TLI) {
2917   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2918 }
2919 
2920 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
2921   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2922 }
2923 
2924 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
2925                            unsigned Depth) {
2926   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
2927 
2928   // If we're told that NaNs won't happen, assume they won't.
2929   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
2930     if (FPMathOp->hasNoNaNs())
2931       return true;
2932 
2933   // Handle scalar constants.
2934   if (auto *CFP = dyn_cast<ConstantFP>(V))
2935     return !CFP->isNaN();
2936 
2937   if (Depth == MaxDepth)
2938     return false;
2939 
2940   if (auto *Inst = dyn_cast<Instruction>(V)) {
2941     switch (Inst->getOpcode()) {
2942     case Instruction::FAdd:
2943     case Instruction::FMul:
2944     case Instruction::FSub:
2945     case Instruction::FDiv:
2946     case Instruction::FRem: {
2947       // TODO: Need isKnownNeverInfinity
2948       return false;
2949     }
2950     case Instruction::Select: {
2951       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
2952              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
2953     }
2954     case Instruction::SIToFP:
2955     case Instruction::UIToFP:
2956       return true;
2957     case Instruction::FPTrunc:
2958     case Instruction::FPExt:
2959       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
2960     default:
2961       break;
2962     }
2963   }
2964 
2965   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
2966     switch (II->getIntrinsicID()) {
2967     case Intrinsic::canonicalize:
2968     case Intrinsic::fabs:
2969     case Intrinsic::copysign:
2970     case Intrinsic::exp:
2971     case Intrinsic::exp2:
2972     case Intrinsic::floor:
2973     case Intrinsic::ceil:
2974     case Intrinsic::trunc:
2975     case Intrinsic::rint:
2976     case Intrinsic::nearbyint:
2977     case Intrinsic::round:
2978       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
2979     case Intrinsic::sqrt:
2980       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
2981              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
2982     default:
2983       return false;
2984     }
2985   }
2986 
2987   // Bail out for constant expressions, but try to handle vector constants.
2988   if (!V->getType()->isVectorTy() || !isa<Constant>(V))
2989     return false;
2990 
2991   // For vectors, verify that each element is not NaN.
2992   unsigned NumElts = V->getType()->getVectorNumElements();
2993   for (unsigned i = 0; i != NumElts; ++i) {
2994     Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
2995     if (!Elt)
2996       return false;
2997     if (isa<UndefValue>(Elt))
2998       continue;
2999     auto *CElt = dyn_cast<ConstantFP>(Elt);
3000     if (!CElt || CElt->isNaN())
3001       return false;
3002   }
3003   // All elements were confirmed not-NaN or undefined.
3004   return true;
3005 }
3006 
3007 /// If the specified value can be set by repeating the same byte in memory,
3008 /// return the i8 value that it is represented with.  This is
3009 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
3010 /// i16 0xF0F0, double 0.0 etc.  If the value can't be handled with a repeated
3011 /// byte store (e.g. i16 0x1234), return null.
3012 Value *llvm::isBytewiseValue(Value *V) {
3013   // All byte-wide stores are splatable, even of arbitrary variables.
3014   if (V->getType()->isIntegerTy(8)) return V;
3015 
3016   // Handle 'null' ConstantArrayZero etc.
3017   if (Constant *C = dyn_cast<Constant>(V))
3018     if (C->isNullValue())
3019       return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
3020 
3021   // Constant float and double values can be handled as integer values if the
3022   // corresponding integer value is "byteable".  An important case is 0.0.
3023   if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3024     if (CFP->getType()->isFloatTy())
3025       V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
3026     if (CFP->getType()->isDoubleTy())
3027       V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
3028     // Don't handle long double formats, which have strange constraints.
3029   }
3030 
3031   // We can handle constant integers that are multiple of 8 bits.
3032   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
3033     if (CI->getBitWidth() % 8 == 0) {
3034       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3035 
3036       if (!CI->getValue().isSplat(8))
3037         return nullptr;
3038       return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
3039     }
3040   }
3041 
3042   // A ConstantDataArray/Vector is splatable if all its members are equal and
3043   // also splatable.
3044   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
3045     Value *Elt = CA->getElementAsConstant(0);
3046     Value *Val = isBytewiseValue(Elt);
3047     if (!Val)
3048       return nullptr;
3049 
3050     for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
3051       if (CA->getElementAsConstant(I) != Elt)
3052         return nullptr;
3053 
3054     return Val;
3055   }
3056 
3057   // Conceptually, we could handle things like:
3058   //   %a = zext i8 %X to i16
3059   //   %b = shl i16 %a, 8
3060   //   %c = or i16 %a, %b
3061   // but until there is an example that actually needs this, it doesn't seem
3062   // worth worrying about.
3063   return nullptr;
3064 }
3065 
3066 // This is the recursive version of BuildSubAggregate. It takes a few different
3067 // arguments. Idxs is the index within the nested struct From that we are
3068 // looking at now (which is of type IndexedType). IdxSkip is the number of
3069 // indices from Idxs that should be left out when inserting into the resulting
3070 // struct. To is the result struct built so far, new insertvalue instructions
3071 // build on that.
3072 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3073                                 SmallVectorImpl<unsigned> &Idxs,
3074                                 unsigned IdxSkip,
3075                                 Instruction *InsertBefore) {
3076   StructType *STy = dyn_cast<StructType>(IndexedType);
3077   if (STy) {
3078     // Save the original To argument so we can modify it
3079     Value *OrigTo = To;
3080     // General case, the type indexed by Idxs is a struct
3081     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3082       // Process each struct element recursively
3083       Idxs.push_back(i);
3084       Value *PrevTo = To;
3085       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3086                              InsertBefore);
3087       Idxs.pop_back();
3088       if (!To) {
3089         // Couldn't find any inserted value for this index? Cleanup
3090         while (PrevTo != OrigTo) {
3091           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3092           PrevTo = Del->getAggregateOperand();
3093           Del->eraseFromParent();
3094         }
3095         // Stop processing elements
3096         break;
3097       }
3098     }
3099     // If we successfully found a value for each of our subaggregates
3100     if (To)
3101       return To;
3102   }
3103   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3104   // the struct's elements had a value that was inserted directly. In the latter
3105   // case, perhaps we can't determine each of the subelements individually, but
3106   // we might be able to find the complete struct somewhere.
3107 
3108   // Find the value that is at that particular spot
3109   Value *V = FindInsertedValue(From, Idxs);
3110 
3111   if (!V)
3112     return nullptr;
3113 
3114   // Insert the value in the new (sub) aggregate
3115   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3116                                  "tmp", InsertBefore);
3117 }
3118 
3119 // This helper takes a nested struct and extracts a part of it (which is again a
3120 // struct) into a new value. For example, given the struct:
3121 // { a, { b, { c, d }, e } }
3122 // and the indices "1, 1" this returns
3123 // { c, d }.
3124 //
3125 // It does this by inserting an insertvalue for each element in the resulting
3126 // struct, as opposed to just inserting a single struct. This will only work if
3127 // each of the elements of the substruct are known (ie, inserted into From by an
3128 // insertvalue instruction somewhere).
3129 //
3130 // All inserted insertvalue instructions are inserted before InsertBefore
3131 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3132                                 Instruction *InsertBefore) {
3133   assert(InsertBefore && "Must have someplace to insert!");
3134   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3135                                                              idx_range);
3136   Value *To = UndefValue::get(IndexedType);
3137   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3138   unsigned IdxSkip = Idxs.size();
3139 
3140   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3141 }
3142 
3143 /// Given an aggregate and a sequence of indices, see if the scalar value
3144 /// indexed is already around as a register, for example if it was inserted
3145 /// directly into the aggregate.
3146 ///
3147 /// If InsertBefore is not null, this function will duplicate (modified)
3148 /// insertvalues when a part of a nested struct is extracted.
3149 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3150                                Instruction *InsertBefore) {
3151   // Nothing to index? Just return V then (this is useful at the end of our
3152   // recursion).
3153   if (idx_range.empty())
3154     return V;
3155   // We have indices, so V should have an indexable type.
3156   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3157          "Not looking at a struct or array?");
3158   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3159          "Invalid indices for type?");
3160 
3161   if (Constant *C = dyn_cast<Constant>(V)) {
3162     C = C->getAggregateElement(idx_range[0]);
3163     if (!C) return nullptr;
3164     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3165   }
3166 
3167   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3168     // Loop the indices for the insertvalue instruction in parallel with the
3169     // requested indices
3170     const unsigned *req_idx = idx_range.begin();
3171     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3172          i != e; ++i, ++req_idx) {
3173       if (req_idx == idx_range.end()) {
3174         // We can't handle this without inserting insertvalues
3175         if (!InsertBefore)
3176           return nullptr;
3177 
3178         // The requested index identifies a part of a nested aggregate. Handle
3179         // this specially. For example,
3180         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3181         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3182         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3183         // This can be changed into
3184         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3185         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3186         // which allows the unused 0,0 element from the nested struct to be
3187         // removed.
3188         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3189                                  InsertBefore);
3190       }
3191 
3192       // This insert value inserts something else than what we are looking for.
3193       // See if the (aggregate) value inserted into has the value we are
3194       // looking for, then.
3195       if (*req_idx != *i)
3196         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3197                                  InsertBefore);
3198     }
3199     // If we end up here, the indices of the insertvalue match with those
3200     // requested (though possibly only partially). Now we recursively look at
3201     // the inserted value, passing any remaining indices.
3202     return FindInsertedValue(I->getInsertedValueOperand(),
3203                              makeArrayRef(req_idx, idx_range.end()),
3204                              InsertBefore);
3205   }
3206 
3207   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3208     // If we're extracting a value from an aggregate that was extracted from
3209     // something else, we can extract from that something else directly instead.
3210     // However, we will need to chain I's indices with the requested indices.
3211 
3212     // Calculate the number of indices required
3213     unsigned size = I->getNumIndices() + idx_range.size();
3214     // Allocate some space to put the new indices in
3215     SmallVector<unsigned, 5> Idxs;
3216     Idxs.reserve(size);
3217     // Add indices from the extract value instruction
3218     Idxs.append(I->idx_begin(), I->idx_end());
3219 
3220     // Add requested indices
3221     Idxs.append(idx_range.begin(), idx_range.end());
3222 
3223     assert(Idxs.size() == size
3224            && "Number of indices added not correct?");
3225 
3226     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3227   }
3228   // Otherwise, we don't know (such as, extracting from a function return value
3229   // or load instruction)
3230   return nullptr;
3231 }
3232 
3233 /// Analyze the specified pointer to see if it can be expressed as a base
3234 /// pointer plus a constant offset. Return the base and offset to the caller.
3235 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
3236                                               const DataLayout &DL) {
3237   unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType());
3238   APInt ByteOffset(BitWidth, 0);
3239 
3240   // We walk up the defs but use a visited set to handle unreachable code. In
3241   // that case, we stop after accumulating the cycle once (not that it
3242   // matters).
3243   SmallPtrSet<Value *, 16> Visited;
3244   while (Visited.insert(Ptr).second) {
3245     if (Ptr->getType()->isVectorTy())
3246       break;
3247 
3248     if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
3249       // If one of the values we have visited is an addrspacecast, then
3250       // the pointer type of this GEP may be different from the type
3251       // of the Ptr parameter which was passed to this function.  This
3252       // means when we construct GEPOffset, we need to use the size
3253       // of GEP's pointer type rather than the size of the original
3254       // pointer type.
3255       APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
3256       if (!GEP->accumulateConstantOffset(DL, GEPOffset))
3257         break;
3258 
3259       ByteOffset += GEPOffset.getSExtValue();
3260 
3261       Ptr = GEP->getPointerOperand();
3262     } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
3263                Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
3264       Ptr = cast<Operator>(Ptr)->getOperand(0);
3265     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
3266       if (GA->isInterposable())
3267         break;
3268       Ptr = GA->getAliasee();
3269     } else {
3270       break;
3271     }
3272   }
3273   Offset = ByteOffset.getSExtValue();
3274   return Ptr;
3275 }
3276 
3277 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3278                                        unsigned CharSize) {
3279   // Make sure the GEP has exactly three arguments.
3280   if (GEP->getNumOperands() != 3)
3281     return false;
3282 
3283   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3284   // CharSize.
3285   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3286   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3287     return false;
3288 
3289   // Check to make sure that the first operand of the GEP is an integer and
3290   // has value 0 so that we are sure we're indexing into the initializer.
3291   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3292   if (!FirstIdx || !FirstIdx->isZero())
3293     return false;
3294 
3295   return true;
3296 }
3297 
3298 bool llvm::getConstantDataArrayInfo(const Value *V,
3299                                     ConstantDataArraySlice &Slice,
3300                                     unsigned ElementSize, uint64_t Offset) {
3301   assert(V);
3302 
3303   // Look through bitcast instructions and geps.
3304   V = V->stripPointerCasts();
3305 
3306   // If the value is a GEP instruction or constant expression, treat it as an
3307   // offset.
3308   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3309     // The GEP operator should be based on a pointer to string constant, and is
3310     // indexing into the string constant.
3311     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3312       return false;
3313 
3314     // If the second index isn't a ConstantInt, then this is a variable index
3315     // into the array.  If this occurs, we can't say anything meaningful about
3316     // the string.
3317     uint64_t StartIdx = 0;
3318     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3319       StartIdx = CI->getZExtValue();
3320     else
3321       return false;
3322     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3323                                     StartIdx + Offset);
3324   }
3325 
3326   // The GEP instruction, constant or instruction, must reference a global
3327   // variable that is a constant and is initialized. The referenced constant
3328   // initializer is the array that we'll use for optimization.
3329   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3330   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3331     return false;
3332 
3333   const ConstantDataArray *Array;
3334   ArrayType *ArrayTy;
3335   if (GV->getInitializer()->isNullValue()) {
3336     Type *GVTy = GV->getValueType();
3337     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3338       // A zeroinitializer for the array; there is no ConstantDataArray.
3339       Array = nullptr;
3340     } else {
3341       const DataLayout &DL = GV->getParent()->getDataLayout();
3342       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3343       uint64_t Length = SizeInBytes / (ElementSize / 8);
3344       if (Length <= Offset)
3345         return false;
3346 
3347       Slice.Array = nullptr;
3348       Slice.Offset = 0;
3349       Slice.Length = Length - Offset;
3350       return true;
3351     }
3352   } else {
3353     // This must be a ConstantDataArray.
3354     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3355     if (!Array)
3356       return false;
3357     ArrayTy = Array->getType();
3358   }
3359   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3360     return false;
3361 
3362   uint64_t NumElts = ArrayTy->getArrayNumElements();
3363   if (Offset > NumElts)
3364     return false;
3365 
3366   Slice.Array = Array;
3367   Slice.Offset = Offset;
3368   Slice.Length = NumElts - Offset;
3369   return true;
3370 }
3371 
3372 /// This function computes the length of a null-terminated C string pointed to
3373 /// by V. If successful, it returns true and returns the string in Str.
3374 /// If unsuccessful, it returns false.
3375 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3376                                  uint64_t Offset, bool TrimAtNul) {
3377   ConstantDataArraySlice Slice;
3378   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3379     return false;
3380 
3381   if (Slice.Array == nullptr) {
3382     if (TrimAtNul) {
3383       Str = StringRef();
3384       return true;
3385     }
3386     if (Slice.Length == 1) {
3387       Str = StringRef("", 1);
3388       return true;
3389     }
3390     // We cannot instantiate a StringRef as we do not have an appropriate string
3391     // of 0s at hand.
3392     return false;
3393   }
3394 
3395   // Start out with the entire array in the StringRef.
3396   Str = Slice.Array->getAsString();
3397   // Skip over 'offset' bytes.
3398   Str = Str.substr(Slice.Offset);
3399 
3400   if (TrimAtNul) {
3401     // Trim off the \0 and anything after it.  If the array is not nul
3402     // terminated, we just return the whole end of string.  The client may know
3403     // some other way that the string is length-bound.
3404     Str = Str.substr(0, Str.find('\0'));
3405   }
3406   return true;
3407 }
3408 
3409 // These next two are very similar to the above, but also look through PHI
3410 // nodes.
3411 // TODO: See if we can integrate these two together.
3412 
3413 /// If we can compute the length of the string pointed to by
3414 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3415 static uint64_t GetStringLengthH(const Value *V,
3416                                  SmallPtrSetImpl<const PHINode*> &PHIs,
3417                                  unsigned CharSize) {
3418   // Look through noop bitcast instructions.
3419   V = V->stripPointerCasts();
3420 
3421   // If this is a PHI node, there are two cases: either we have already seen it
3422   // or we haven't.
3423   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3424     if (!PHIs.insert(PN).second)
3425       return ~0ULL;  // already in the set.
3426 
3427     // If it was new, see if all the input strings are the same length.
3428     uint64_t LenSoFar = ~0ULL;
3429     for (Value *IncValue : PN->incoming_values()) {
3430       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3431       if (Len == 0) return 0; // Unknown length -> unknown.
3432 
3433       if (Len == ~0ULL) continue;
3434 
3435       if (Len != LenSoFar && LenSoFar != ~0ULL)
3436         return 0;    // Disagree -> unknown.
3437       LenSoFar = Len;
3438     }
3439 
3440     // Success, all agree.
3441     return LenSoFar;
3442   }
3443 
3444   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3445   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3446     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3447     if (Len1 == 0) return 0;
3448     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3449     if (Len2 == 0) return 0;
3450     if (Len1 == ~0ULL) return Len2;
3451     if (Len2 == ~0ULL) return Len1;
3452     if (Len1 != Len2) return 0;
3453     return Len1;
3454   }
3455 
3456   // Otherwise, see if we can read the string.
3457   ConstantDataArraySlice Slice;
3458   if (!getConstantDataArrayInfo(V, Slice, CharSize))
3459     return 0;
3460 
3461   if (Slice.Array == nullptr)
3462     return 1;
3463 
3464   // Search for nul characters
3465   unsigned NullIndex = 0;
3466   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3467     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3468       break;
3469   }
3470 
3471   return NullIndex + 1;
3472 }
3473 
3474 /// If we can compute the length of the string pointed to by
3475 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3476 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3477   if (!V->getType()->isPointerTy())
3478     return 0;
3479 
3480   SmallPtrSet<const PHINode*, 32> PHIs;
3481   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3482   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3483   // an empty string as a length.
3484   return Len == ~0ULL ? 1 : Len;
3485 }
3486 
3487 const Value *llvm::getArgumentAliasingToReturnedPointer(ImmutableCallSite CS) {
3488   assert(CS &&
3489          "getArgumentAliasingToReturnedPointer only works on nonnull CallSite");
3490   if (const Value *RV = CS.getReturnedArgOperand())
3491     return RV;
3492   // This can be used only as a aliasing property.
3493   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CS))
3494     return CS.getArgOperand(0);
3495   return nullptr;
3496 }
3497 
3498 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
3499     ImmutableCallSite CS) {
3500   return CS.getIntrinsicID() == Intrinsic::launder_invariant_group ||
3501          CS.getIntrinsicID() == Intrinsic::strip_invariant_group;
3502 }
3503 
3504 /// \p PN defines a loop-variant pointer to an object.  Check if the
3505 /// previous iteration of the loop was referring to the same object as \p PN.
3506 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
3507                                          const LoopInfo *LI) {
3508   // Find the loop-defined value.
3509   Loop *L = LI->getLoopFor(PN->getParent());
3510   if (PN->getNumIncomingValues() != 2)
3511     return true;
3512 
3513   // Find the value from previous iteration.
3514   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3515   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3516     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3517   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3518     return true;
3519 
3520   // If a new pointer is loaded in the loop, the pointer references a different
3521   // object in every iteration.  E.g.:
3522   //    for (i)
3523   //       int *p = a[i];
3524   //       ...
3525   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3526     if (!L->isLoopInvariant(Load->getPointerOperand()))
3527       return false;
3528   return true;
3529 }
3530 
3531 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3532                                  unsigned MaxLookup) {
3533   if (!V->getType()->isPointerTy())
3534     return V;
3535   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3536     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3537       V = GEP->getPointerOperand();
3538     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3539                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3540       V = cast<Operator>(V)->getOperand(0);
3541     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3542       if (GA->isInterposable())
3543         return V;
3544       V = GA->getAliasee();
3545     } else if (isa<AllocaInst>(V)) {
3546       // An alloca can't be further simplified.
3547       return V;
3548     } else {
3549       if (auto CS = CallSite(V)) {
3550         // CaptureTracking can know about special capturing properties of some
3551         // intrinsics like launder.invariant.group, that can't be expressed with
3552         // the attributes, but have properties like returning aliasing pointer.
3553         // Because some analysis may assume that nocaptured pointer is not
3554         // returned from some special intrinsic (because function would have to
3555         // be marked with returns attribute), it is crucial to use this function
3556         // because it should be in sync with CaptureTracking. Not using it may
3557         // cause weird miscompilations where 2 aliasing pointers are assumed to
3558         // noalias.
3559         if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
3560           V = RP;
3561           continue;
3562         }
3563       }
3564 
3565       // See if InstructionSimplify knows any relevant tricks.
3566       if (Instruction *I = dyn_cast<Instruction>(V))
3567         // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3568         if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3569           V = Simplified;
3570           continue;
3571         }
3572 
3573       return V;
3574     }
3575     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3576   }
3577   return V;
3578 }
3579 
3580 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3581                                 const DataLayout &DL, LoopInfo *LI,
3582                                 unsigned MaxLookup) {
3583   SmallPtrSet<Value *, 4> Visited;
3584   SmallVector<Value *, 4> Worklist;
3585   Worklist.push_back(V);
3586   do {
3587     Value *P = Worklist.pop_back_val();
3588     P = GetUnderlyingObject(P, DL, MaxLookup);
3589 
3590     if (!Visited.insert(P).second)
3591       continue;
3592 
3593     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3594       Worklist.push_back(SI->getTrueValue());
3595       Worklist.push_back(SI->getFalseValue());
3596       continue;
3597     }
3598 
3599     if (PHINode *PN = dyn_cast<PHINode>(P)) {
3600       // If this PHI changes the underlying object in every iteration of the
3601       // loop, don't look through it.  Consider:
3602       //   int **A;
3603       //   for (i) {
3604       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
3605       //     Curr = A[i];
3606       //     *Prev, *Curr;
3607       //
3608       // Prev is tracking Curr one iteration behind so they refer to different
3609       // underlying objects.
3610       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3611           isSameUnderlyingObjectInLoop(PN, LI))
3612         for (Value *IncValue : PN->incoming_values())
3613           Worklist.push_back(IncValue);
3614       continue;
3615     }
3616 
3617     Objects.push_back(P);
3618   } while (!Worklist.empty());
3619 }
3620 
3621 /// This is the function that does the work of looking through basic
3622 /// ptrtoint+arithmetic+inttoptr sequences.
3623 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3624   do {
3625     if (const Operator *U = dyn_cast<Operator>(V)) {
3626       // If we find a ptrtoint, we can transfer control back to the
3627       // regular getUnderlyingObjectFromInt.
3628       if (U->getOpcode() == Instruction::PtrToInt)
3629         return U->getOperand(0);
3630       // If we find an add of a constant, a multiplied value, or a phi, it's
3631       // likely that the other operand will lead us to the base
3632       // object. We don't have to worry about the case where the
3633       // object address is somehow being computed by the multiply,
3634       // because our callers only care when the result is an
3635       // identifiable object.
3636       if (U->getOpcode() != Instruction::Add ||
3637           (!isa<ConstantInt>(U->getOperand(1)) &&
3638            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3639            !isa<PHINode>(U->getOperand(1))))
3640         return V;
3641       V = U->getOperand(0);
3642     } else {
3643       return V;
3644     }
3645     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3646   } while (true);
3647 }
3648 
3649 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3650 /// ptrtoint+arithmetic+inttoptr sequences.
3651 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3652 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
3653                           SmallVectorImpl<Value *> &Objects,
3654                           const DataLayout &DL) {
3655   SmallPtrSet<const Value *, 16> Visited;
3656   SmallVector<const Value *, 4> Working(1, V);
3657   do {
3658     V = Working.pop_back_val();
3659 
3660     SmallVector<Value *, 4> Objs;
3661     GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
3662 
3663     for (Value *V : Objs) {
3664       if (!Visited.insert(V).second)
3665         continue;
3666       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3667         const Value *O =
3668           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3669         if (O->getType()->isPointerTy()) {
3670           Working.push_back(O);
3671           continue;
3672         }
3673       }
3674       // If GetUnderlyingObjects fails to find an identifiable object,
3675       // getUnderlyingObjectsForCodeGen also fails for safety.
3676       if (!isIdentifiedObject(V)) {
3677         Objects.clear();
3678         return false;
3679       }
3680       Objects.push_back(const_cast<Value *>(V));
3681     }
3682   } while (!Working.empty());
3683   return true;
3684 }
3685 
3686 /// Return true if the only users of this pointer are lifetime markers.
3687 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3688   for (const User *U : V->users()) {
3689     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3690     if (!II) return false;
3691 
3692     if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3693         II->getIntrinsicID() != Intrinsic::lifetime_end)
3694       return false;
3695   }
3696   return true;
3697 }
3698 
3699 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3700                                         const Instruction *CtxI,
3701                                         const DominatorTree *DT) {
3702   const Operator *Inst = dyn_cast<Operator>(V);
3703   if (!Inst)
3704     return false;
3705 
3706   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3707     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3708       if (C->canTrap())
3709         return false;
3710 
3711   switch (Inst->getOpcode()) {
3712   default:
3713     return true;
3714   case Instruction::UDiv:
3715   case Instruction::URem: {
3716     // x / y is undefined if y == 0.
3717     const APInt *V;
3718     if (match(Inst->getOperand(1), m_APInt(V)))
3719       return *V != 0;
3720     return false;
3721   }
3722   case Instruction::SDiv:
3723   case Instruction::SRem: {
3724     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3725     const APInt *Numerator, *Denominator;
3726     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3727       return false;
3728     // We cannot hoist this division if the denominator is 0.
3729     if (*Denominator == 0)
3730       return false;
3731     // It's safe to hoist if the denominator is not 0 or -1.
3732     if (*Denominator != -1)
3733       return true;
3734     // At this point we know that the denominator is -1.  It is safe to hoist as
3735     // long we know that the numerator is not INT_MIN.
3736     if (match(Inst->getOperand(0), m_APInt(Numerator)))
3737       return !Numerator->isMinSignedValue();
3738     // The numerator *might* be MinSignedValue.
3739     return false;
3740   }
3741   case Instruction::Load: {
3742     const LoadInst *LI = cast<LoadInst>(Inst);
3743     if (!LI->isUnordered() ||
3744         // Speculative load may create a race that did not exist in the source.
3745         LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3746         // Speculative load may load data from dirty regions.
3747         LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3748         LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3749       return false;
3750     const DataLayout &DL = LI->getModule()->getDataLayout();
3751     return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3752                                               LI->getAlignment(), DL, CtxI, DT);
3753   }
3754   case Instruction::Call: {
3755     auto *CI = cast<const CallInst>(Inst);
3756     const Function *Callee = CI->getCalledFunction();
3757 
3758     // The called function could have undefined behavior or side-effects, even
3759     // if marked readnone nounwind.
3760     return Callee && Callee->isSpeculatable();
3761   }
3762   case Instruction::VAArg:
3763   case Instruction::Alloca:
3764   case Instruction::Invoke:
3765   case Instruction::PHI:
3766   case Instruction::Store:
3767   case Instruction::Ret:
3768   case Instruction::Br:
3769   case Instruction::IndirectBr:
3770   case Instruction::Switch:
3771   case Instruction::Unreachable:
3772   case Instruction::Fence:
3773   case Instruction::AtomicRMW:
3774   case Instruction::AtomicCmpXchg:
3775   case Instruction::LandingPad:
3776   case Instruction::Resume:
3777   case Instruction::CatchSwitch:
3778   case Instruction::CatchPad:
3779   case Instruction::CatchRet:
3780   case Instruction::CleanupPad:
3781   case Instruction::CleanupRet:
3782     return false; // Misc instructions which have effects
3783   }
3784 }
3785 
3786 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3787   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3788 }
3789 
3790 OverflowResult llvm::computeOverflowForUnsignedMul(
3791     const Value *LHS, const Value *RHS, const DataLayout &DL,
3792     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
3793     bool UseInstrInfo) {
3794   // Multiplying n * m significant bits yields a result of n + m significant
3795   // bits. If the total number of significant bits does not exceed the
3796   // result bit width (minus 1), there is no overflow.
3797   // This means if we have enough leading zero bits in the operands
3798   // we can guarantee that the result does not overflow.
3799   // Ref: "Hacker's Delight" by Henry Warren
3800   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3801   KnownBits LHSKnown(BitWidth);
3802   KnownBits RHSKnown(BitWidth);
3803   computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr,
3804                    UseInstrInfo);
3805   computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr,
3806                    UseInstrInfo);
3807   // Note that underestimating the number of zero bits gives a more
3808   // conservative answer.
3809   unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
3810                       RHSKnown.countMinLeadingZeros();
3811   // First handle the easy case: if we have enough zero bits there's
3812   // definitely no overflow.
3813   if (ZeroBits >= BitWidth)
3814     return OverflowResult::NeverOverflows;
3815 
3816   // Get the largest possible values for each operand.
3817   APInt LHSMax = ~LHSKnown.Zero;
3818   APInt RHSMax = ~RHSKnown.Zero;
3819 
3820   // We know the multiply operation doesn't overflow if the maximum values for
3821   // each operand will not overflow after we multiply them together.
3822   bool MaxOverflow;
3823   (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3824   if (!MaxOverflow)
3825     return OverflowResult::NeverOverflows;
3826 
3827   // We know it always overflows if multiplying the smallest possible values for
3828   // the operands also results in overflow.
3829   bool MinOverflow;
3830   (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
3831   if (MinOverflow)
3832     return OverflowResult::AlwaysOverflows;
3833 
3834   return OverflowResult::MayOverflow;
3835 }
3836 
3837 OverflowResult
3838 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
3839                                   const DataLayout &DL, AssumptionCache *AC,
3840                                   const Instruction *CxtI,
3841                                   const DominatorTree *DT, bool UseInstrInfo) {
3842   // Multiplying n * m significant bits yields a result of n + m significant
3843   // bits. If the total number of significant bits does not exceed the
3844   // result bit width (minus 1), there is no overflow.
3845   // This means if we have enough leading sign bits in the operands
3846   // we can guarantee that the result does not overflow.
3847   // Ref: "Hacker's Delight" by Henry Warren
3848   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3849 
3850   // Note that underestimating the number of sign bits gives a more
3851   // conservative answer.
3852   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
3853                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
3854 
3855   // First handle the easy case: if we have enough sign bits there's
3856   // definitely no overflow.
3857   if (SignBits > BitWidth + 1)
3858     return OverflowResult::NeverOverflows;
3859 
3860   // There are two ambiguous cases where there can be no overflow:
3861   //   SignBits == BitWidth + 1    and
3862   //   SignBits == BitWidth
3863   // The second case is difficult to check, therefore we only handle the
3864   // first case.
3865   if (SignBits == BitWidth + 1) {
3866     // It overflows only when both arguments are negative and the true
3867     // product is exactly the minimum negative number.
3868     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
3869     // For simplicity we just check if at least one side is not negative.
3870     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
3871                                           nullptr, UseInstrInfo);
3872     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
3873                                           nullptr, UseInstrInfo);
3874     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
3875       return OverflowResult::NeverOverflows;
3876   }
3877   return OverflowResult::MayOverflow;
3878 }
3879 
3880 OverflowResult llvm::computeOverflowForUnsignedAdd(
3881     const Value *LHS, const Value *RHS, const DataLayout &DL,
3882     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
3883     bool UseInstrInfo) {
3884   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
3885                                         nullptr, UseInstrInfo);
3886   if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
3887     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
3888                                           nullptr, UseInstrInfo);
3889 
3890     if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
3891       // The sign bit is set in both cases: this MUST overflow.
3892       // Create a simple add instruction, and insert it into the struct.
3893       return OverflowResult::AlwaysOverflows;
3894     }
3895 
3896     if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
3897       // The sign bit is clear in both cases: this CANNOT overflow.
3898       // Create a simple add instruction, and insert it into the struct.
3899       return OverflowResult::NeverOverflows;
3900     }
3901   }
3902 
3903   return OverflowResult::MayOverflow;
3904 }
3905 
3906 /// Return true if we can prove that adding the two values of the
3907 /// knownbits will not overflow.
3908 /// Otherwise return false.
3909 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
3910                                     const KnownBits &RHSKnown) {
3911   // Addition of two 2's complement numbers having opposite signs will never
3912   // overflow.
3913   if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
3914       (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
3915     return true;
3916 
3917   // If either of the values is known to be non-negative, adding them can only
3918   // overflow if the second is also non-negative, so we can assume that.
3919   // Two non-negative numbers will only overflow if there is a carry to the
3920   // sign bit, so we can check if even when the values are as big as possible
3921   // there is no overflow to the sign bit.
3922   if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
3923     APInt MaxLHS = ~LHSKnown.Zero;
3924     MaxLHS.clearSignBit();
3925     APInt MaxRHS = ~RHSKnown.Zero;
3926     MaxRHS.clearSignBit();
3927     APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
3928     return Result.isSignBitClear();
3929   }
3930 
3931   // If either of the values is known to be negative, adding them can only
3932   // overflow if the second is also negative, so we can assume that.
3933   // Two negative number will only overflow if there is no carry to the sign
3934   // bit, so we can check if even when the values are as small as possible
3935   // there is overflow to the sign bit.
3936   if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
3937     APInt MinLHS = LHSKnown.One;
3938     MinLHS.clearSignBit();
3939     APInt MinRHS = RHSKnown.One;
3940     MinRHS.clearSignBit();
3941     APInt Result = std::move(MinLHS) + std::move(MinRHS);
3942     return Result.isSignBitSet();
3943   }
3944 
3945   // If we reached here it means that we know nothing about the sign bits.
3946   // In this case we can't know if there will be an overflow, since by
3947   // changing the sign bits any two values can be made to overflow.
3948   return false;
3949 }
3950 
3951 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
3952                                                   const Value *RHS,
3953                                                   const AddOperator *Add,
3954                                                   const DataLayout &DL,
3955                                                   AssumptionCache *AC,
3956                                                   const Instruction *CxtI,
3957                                                   const DominatorTree *DT) {
3958   if (Add && Add->hasNoSignedWrap()) {
3959     return OverflowResult::NeverOverflows;
3960   }
3961 
3962   // If LHS and RHS each have at least two sign bits, the addition will look
3963   // like
3964   //
3965   // XX..... +
3966   // YY.....
3967   //
3968   // If the carry into the most significant position is 0, X and Y can't both
3969   // be 1 and therefore the carry out of the addition is also 0.
3970   //
3971   // If the carry into the most significant position is 1, X and Y can't both
3972   // be 0 and therefore the carry out of the addition is also 1.
3973   //
3974   // Since the carry into the most significant position is always equal to
3975   // the carry out of the addition, there is no signed overflow.
3976   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3977       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3978     return OverflowResult::NeverOverflows;
3979 
3980   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3981   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3982 
3983   if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
3984     return OverflowResult::NeverOverflows;
3985 
3986   // The remaining code needs Add to be available. Early returns if not so.
3987   if (!Add)
3988     return OverflowResult::MayOverflow;
3989 
3990   // If the sign of Add is the same as at least one of the operands, this add
3991   // CANNOT overflow. This is particularly useful when the sum is
3992   // @llvm.assume'ed non-negative rather than proved so from analyzing its
3993   // operands.
3994   bool LHSOrRHSKnownNonNegative =
3995       (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
3996   bool LHSOrRHSKnownNegative =
3997       (LHSKnown.isNegative() || RHSKnown.isNegative());
3998   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3999     KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
4000     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4001         (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
4002       return OverflowResult::NeverOverflows;
4003     }
4004   }
4005 
4006   return OverflowResult::MayOverflow;
4007 }
4008 
4009 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4010                                                    const Value *RHS,
4011                                                    const DataLayout &DL,
4012                                                    AssumptionCache *AC,
4013                                                    const Instruction *CxtI,
4014                                                    const DominatorTree *DT) {
4015   // If the LHS is negative and the RHS is non-negative, no unsigned wrap.
4016   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
4017   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
4018   if (LHSKnown.isNegative() && RHSKnown.isNonNegative())
4019     return OverflowResult::NeverOverflows;
4020 
4021   return OverflowResult::MayOverflow;
4022 }
4023 
4024 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4025                                                  const Value *RHS,
4026                                                  const DataLayout &DL,
4027                                                  AssumptionCache *AC,
4028                                                  const Instruction *CxtI,
4029                                                  const DominatorTree *DT) {
4030   // If LHS and RHS each have at least two sign bits, the subtraction
4031   // cannot overflow.
4032   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4033       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4034     return OverflowResult::NeverOverflows;
4035 
4036   KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT);
4037 
4038   KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT);
4039 
4040   // Subtraction of two 2's complement numbers having identical signs will
4041   // never overflow.
4042   if ((LHSKnown.isNegative() && RHSKnown.isNegative()) ||
4043       (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()))
4044     return OverflowResult::NeverOverflows;
4045 
4046   // TODO: implement logic similar to checkRippleForAdd
4047   return OverflowResult::MayOverflow;
4048 }
4049 
4050 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II,
4051                                      const DominatorTree &DT) {
4052 #ifndef NDEBUG
4053   auto IID = II->getIntrinsicID();
4054   assert((IID == Intrinsic::sadd_with_overflow ||
4055           IID == Intrinsic::uadd_with_overflow ||
4056           IID == Intrinsic::ssub_with_overflow ||
4057           IID == Intrinsic::usub_with_overflow ||
4058           IID == Intrinsic::smul_with_overflow ||
4059           IID == Intrinsic::umul_with_overflow) &&
4060          "Not an overflow intrinsic!");
4061 #endif
4062 
4063   SmallVector<const BranchInst *, 2> GuardingBranches;
4064   SmallVector<const ExtractValueInst *, 2> Results;
4065 
4066   for (const User *U : II->users()) {
4067     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4068       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4069 
4070       if (EVI->getIndices()[0] == 0)
4071         Results.push_back(EVI);
4072       else {
4073         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4074 
4075         for (const auto *U : EVI->users())
4076           if (const auto *B = dyn_cast<BranchInst>(U)) {
4077             assert(B->isConditional() && "How else is it using an i1?");
4078             GuardingBranches.push_back(B);
4079           }
4080       }
4081     } else {
4082       // We are using the aggregate directly in a way we don't want to analyze
4083       // here (storing it to a global, say).
4084       return false;
4085     }
4086   }
4087 
4088   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4089     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4090     if (!NoWrapEdge.isSingleEdge())
4091       return false;
4092 
4093     // Check if all users of the add are provably no-wrap.
4094     for (const auto *Result : Results) {
4095       // If the extractvalue itself is not executed on overflow, the we don't
4096       // need to check each use separately, since domination is transitive.
4097       if (DT.dominates(NoWrapEdge, Result->getParent()))
4098         continue;
4099 
4100       for (auto &RU : Result->uses())
4101         if (!DT.dominates(NoWrapEdge, RU))
4102           return false;
4103     }
4104 
4105     return true;
4106   };
4107 
4108   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4109 }
4110 
4111 
4112 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
4113                                                  const DataLayout &DL,
4114                                                  AssumptionCache *AC,
4115                                                  const Instruction *CxtI,
4116                                                  const DominatorTree *DT) {
4117   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
4118                                        Add, DL, AC, CxtI, DT);
4119 }
4120 
4121 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
4122                                                  const Value *RHS,
4123                                                  const DataLayout &DL,
4124                                                  AssumptionCache *AC,
4125                                                  const Instruction *CxtI,
4126                                                  const DominatorTree *DT) {
4127   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4128 }
4129 
4130 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
4131   // A memory operation returns normally if it isn't volatile. A volatile
4132   // operation is allowed to trap.
4133   //
4134   // An atomic operation isn't guaranteed to return in a reasonable amount of
4135   // time because it's possible for another thread to interfere with it for an
4136   // arbitrary length of time, but programs aren't allowed to rely on that.
4137   if (const LoadInst *LI = dyn_cast<LoadInst>(I))
4138     return !LI->isVolatile();
4139   if (const StoreInst *SI = dyn_cast<StoreInst>(I))
4140     return !SI->isVolatile();
4141   if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
4142     return !CXI->isVolatile();
4143   if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
4144     return !RMWI->isVolatile();
4145   if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
4146     return !MII->isVolatile();
4147 
4148   // If there is no successor, then execution can't transfer to it.
4149   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4150     return !CRI->unwindsToCaller();
4151   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4152     return !CatchSwitch->unwindsToCaller();
4153   if (isa<ResumeInst>(I))
4154     return false;
4155   if (isa<ReturnInst>(I))
4156     return false;
4157   if (isa<UnreachableInst>(I))
4158     return false;
4159 
4160   // Calls can throw, or contain an infinite loop, or kill the process.
4161   if (auto CS = ImmutableCallSite(I)) {
4162     // Call sites that throw have implicit non-local control flow.
4163     if (!CS.doesNotThrow())
4164       return false;
4165 
4166     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4167     // etc. and thus not return.  However, LLVM already assumes that
4168     //
4169     //  - Thread exiting actions are modeled as writes to memory invisible to
4170     //    the program.
4171     //
4172     //  - Loops that don't have side effects (side effects are volatile/atomic
4173     //    stores and IO) always terminate (see http://llvm.org/PR965).
4174     //    Furthermore IO itself is also modeled as writes to memory invisible to
4175     //    the program.
4176     //
4177     // We rely on those assumptions here, and use the memory effects of the call
4178     // target as a proxy for checking that it always returns.
4179 
4180     // FIXME: This isn't aggressive enough; a call which only writes to a global
4181     // is guaranteed to return.
4182     return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
4183            match(I, m_Intrinsic<Intrinsic::assume>()) ||
4184            match(I, m_Intrinsic<Intrinsic::sideeffect>());
4185   }
4186 
4187   // Other instructions return normally.
4188   return true;
4189 }
4190 
4191 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
4192   // TODO: This is slightly consdervative for invoke instruction since exiting
4193   // via an exception *is* normal control for them.
4194   for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
4195     if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
4196       return false;
4197   return true;
4198 }
4199 
4200 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
4201                                                   const Loop *L) {
4202   // The loop header is guaranteed to be executed for every iteration.
4203   //
4204   // FIXME: Relax this constraint to cover all basic blocks that are
4205   // guaranteed to be executed at every iteration.
4206   if (I->getParent() != L->getHeader()) return false;
4207 
4208   for (const Instruction &LI : *L->getHeader()) {
4209     if (&LI == I) return true;
4210     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
4211   }
4212   llvm_unreachable("Instruction not contained in its own parent basic block.");
4213 }
4214 
4215 bool llvm::propagatesFullPoison(const Instruction *I) {
4216   switch (I->getOpcode()) {
4217   case Instruction::Add:
4218   case Instruction::Sub:
4219   case Instruction::Xor:
4220   case Instruction::Trunc:
4221   case Instruction::BitCast:
4222   case Instruction::AddrSpaceCast:
4223   case Instruction::Mul:
4224   case Instruction::Shl:
4225   case Instruction::GetElementPtr:
4226     // These operations all propagate poison unconditionally. Note that poison
4227     // is not any particular value, so xor or subtraction of poison with
4228     // itself still yields poison, not zero.
4229     return true;
4230 
4231   case Instruction::AShr:
4232   case Instruction::SExt:
4233     // For these operations, one bit of the input is replicated across
4234     // multiple output bits. A replicated poison bit is still poison.
4235     return true;
4236 
4237   case Instruction::ICmp:
4238     // Comparing poison with any value yields poison.  This is why, for
4239     // instance, x s< (x +nsw 1) can be folded to true.
4240     return true;
4241 
4242   default:
4243     return false;
4244   }
4245 }
4246 
4247 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
4248   switch (I->getOpcode()) {
4249     case Instruction::Store:
4250       return cast<StoreInst>(I)->getPointerOperand();
4251 
4252     case Instruction::Load:
4253       return cast<LoadInst>(I)->getPointerOperand();
4254 
4255     case Instruction::AtomicCmpXchg:
4256       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4257 
4258     case Instruction::AtomicRMW:
4259       return cast<AtomicRMWInst>(I)->getPointerOperand();
4260 
4261     case Instruction::UDiv:
4262     case Instruction::SDiv:
4263     case Instruction::URem:
4264     case Instruction::SRem:
4265       return I->getOperand(1);
4266 
4267     default:
4268       return nullptr;
4269   }
4270 }
4271 
4272 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
4273   // We currently only look for uses of poison values within the same basic
4274   // block, as that makes it easier to guarantee that the uses will be
4275   // executed given that PoisonI is executed.
4276   //
4277   // FIXME: Expand this to consider uses beyond the same basic block. To do
4278   // this, look out for the distinction between post-dominance and strong
4279   // post-dominance.
4280   const BasicBlock *BB = PoisonI->getParent();
4281 
4282   // Set of instructions that we have proved will yield poison if PoisonI
4283   // does.
4284   SmallSet<const Value *, 16> YieldsPoison;
4285   SmallSet<const BasicBlock *, 4> Visited;
4286   YieldsPoison.insert(PoisonI);
4287   Visited.insert(PoisonI->getParent());
4288 
4289   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4290 
4291   unsigned Iter = 0;
4292   while (Iter++ < MaxDepth) {
4293     for (auto &I : make_range(Begin, End)) {
4294       if (&I != PoisonI) {
4295         const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
4296         if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
4297           return true;
4298         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4299           return false;
4300       }
4301 
4302       // Mark poison that propagates from I through uses of I.
4303       if (YieldsPoison.count(&I)) {
4304         for (const User *User : I.users()) {
4305           const Instruction *UserI = cast<Instruction>(User);
4306           if (propagatesFullPoison(UserI))
4307             YieldsPoison.insert(User);
4308         }
4309       }
4310     }
4311 
4312     if (auto *NextBB = BB->getSingleSuccessor()) {
4313       if (Visited.insert(NextBB).second) {
4314         BB = NextBB;
4315         Begin = BB->getFirstNonPHI()->getIterator();
4316         End = BB->end();
4317         continue;
4318       }
4319     }
4320 
4321     break;
4322   }
4323   return false;
4324 }
4325 
4326 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4327   if (FMF.noNaNs())
4328     return true;
4329 
4330   if (auto *C = dyn_cast<ConstantFP>(V))
4331     return !C->isNaN();
4332   return false;
4333 }
4334 
4335 static bool isKnownNonZero(const Value *V) {
4336   if (auto *C = dyn_cast<ConstantFP>(V))
4337     return !C->isZero();
4338   return false;
4339 }
4340 
4341 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4342 /// Given non-min/max outer cmp/select from the clamp pattern this
4343 /// function recognizes if it can be substitued by a "canonical" min/max
4344 /// pattern.
4345 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
4346                                                Value *CmpLHS, Value *CmpRHS,
4347                                                Value *TrueVal, Value *FalseVal,
4348                                                Value *&LHS, Value *&RHS) {
4349   // Try to match
4350   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4351   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4352   // and return description of the outer Max/Min.
4353 
4354   // First, check if select has inverse order:
4355   if (CmpRHS == FalseVal) {
4356     std::swap(TrueVal, FalseVal);
4357     Pred = CmpInst::getInversePredicate(Pred);
4358   }
4359 
4360   // Assume success now. If there's no match, callers should not use these anyway.
4361   LHS = TrueVal;
4362   RHS = FalseVal;
4363 
4364   const APFloat *FC1;
4365   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4366     return {SPF_UNKNOWN, SPNB_NA, false};
4367 
4368   const APFloat *FC2;
4369   switch (Pred) {
4370   case CmpInst::FCMP_OLT:
4371   case CmpInst::FCMP_OLE:
4372   case CmpInst::FCMP_ULT:
4373   case CmpInst::FCMP_ULE:
4374     if (match(FalseVal,
4375               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4376                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4377         FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4378       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4379     break;
4380   case CmpInst::FCMP_OGT:
4381   case CmpInst::FCMP_OGE:
4382   case CmpInst::FCMP_UGT:
4383   case CmpInst::FCMP_UGE:
4384     if (match(FalseVal,
4385               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4386                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4387         FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4388       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4389     break;
4390   default:
4391     break;
4392   }
4393 
4394   return {SPF_UNKNOWN, SPNB_NA, false};
4395 }
4396 
4397 /// Recognize variations of:
4398 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4399 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
4400                                       Value *CmpLHS, Value *CmpRHS,
4401                                       Value *TrueVal, Value *FalseVal) {
4402   // Swap the select operands and predicate to match the patterns below.
4403   if (CmpRHS != TrueVal) {
4404     Pred = ICmpInst::getSwappedPredicate(Pred);
4405     std::swap(TrueVal, FalseVal);
4406   }
4407   const APInt *C1;
4408   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4409     const APInt *C2;
4410     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4411     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4412         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4413       return {SPF_SMAX, SPNB_NA, false};
4414 
4415     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4416     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4417         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4418       return {SPF_SMIN, SPNB_NA, false};
4419 
4420     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4421     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4422         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4423       return {SPF_UMAX, SPNB_NA, false};
4424 
4425     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4426     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4427         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4428       return {SPF_UMIN, SPNB_NA, false};
4429   }
4430   return {SPF_UNKNOWN, SPNB_NA, false};
4431 }
4432 
4433 /// Recognize variations of:
4434 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4435 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
4436                                                Value *CmpLHS, Value *CmpRHS,
4437                                                Value *TVal, Value *FVal,
4438                                                unsigned Depth) {
4439   // TODO: Allow FP min/max with nnan/nsz.
4440   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4441 
4442   Value *A, *B;
4443   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4444   if (!SelectPatternResult::isMinOrMax(L.Flavor))
4445     return {SPF_UNKNOWN, SPNB_NA, false};
4446 
4447   Value *C, *D;
4448   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4449   if (L.Flavor != R.Flavor)
4450     return {SPF_UNKNOWN, SPNB_NA, false};
4451 
4452   // We have something like: x Pred y ? min(a, b) : min(c, d).
4453   // Try to match the compare to the min/max operations of the select operands.
4454   // First, make sure we have the right compare predicate.
4455   switch (L.Flavor) {
4456   case SPF_SMIN:
4457     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4458       Pred = ICmpInst::getSwappedPredicate(Pred);
4459       std::swap(CmpLHS, CmpRHS);
4460     }
4461     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4462       break;
4463     return {SPF_UNKNOWN, SPNB_NA, false};
4464   case SPF_SMAX:
4465     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4466       Pred = ICmpInst::getSwappedPredicate(Pred);
4467       std::swap(CmpLHS, CmpRHS);
4468     }
4469     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4470       break;
4471     return {SPF_UNKNOWN, SPNB_NA, false};
4472   case SPF_UMIN:
4473     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4474       Pred = ICmpInst::getSwappedPredicate(Pred);
4475       std::swap(CmpLHS, CmpRHS);
4476     }
4477     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4478       break;
4479     return {SPF_UNKNOWN, SPNB_NA, false};
4480   case SPF_UMAX:
4481     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4482       Pred = ICmpInst::getSwappedPredicate(Pred);
4483       std::swap(CmpLHS, CmpRHS);
4484     }
4485     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4486       break;
4487     return {SPF_UNKNOWN, SPNB_NA, false};
4488   default:
4489     return {SPF_UNKNOWN, SPNB_NA, false};
4490   }
4491 
4492   // If there is a common operand in the already matched min/max and the other
4493   // min/max operands match the compare operands (either directly or inverted),
4494   // then this is min/max of the same flavor.
4495 
4496   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4497   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4498   if (D == B) {
4499     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4500                                          match(A, m_Not(m_Specific(CmpRHS)))))
4501       return {L.Flavor, SPNB_NA, false};
4502   }
4503   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4504   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4505   if (C == B) {
4506     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4507                                          match(A, m_Not(m_Specific(CmpRHS)))))
4508       return {L.Flavor, SPNB_NA, false};
4509   }
4510   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4511   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4512   if (D == A) {
4513     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4514                                          match(B, m_Not(m_Specific(CmpRHS)))))
4515       return {L.Flavor, SPNB_NA, false};
4516   }
4517   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4518   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4519   if (C == A) {
4520     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4521                                          match(B, m_Not(m_Specific(CmpRHS)))))
4522       return {L.Flavor, SPNB_NA, false};
4523   }
4524 
4525   return {SPF_UNKNOWN, SPNB_NA, false};
4526 }
4527 
4528 /// Match non-obvious integer minimum and maximum sequences.
4529 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
4530                                        Value *CmpLHS, Value *CmpRHS,
4531                                        Value *TrueVal, Value *FalseVal,
4532                                        Value *&LHS, Value *&RHS,
4533                                        unsigned Depth) {
4534   // Assume success. If there's no match, callers should not use these anyway.
4535   LHS = TrueVal;
4536   RHS = FalseVal;
4537 
4538   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4539   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4540     return SPR;
4541 
4542   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4543   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
4544     return SPR;
4545 
4546   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4547     return {SPF_UNKNOWN, SPNB_NA, false};
4548 
4549   // Z = X -nsw Y
4550   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4551   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4552   if (match(TrueVal, m_Zero()) &&
4553       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4554     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4555 
4556   // Z = X -nsw Y
4557   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4558   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4559   if (match(FalseVal, m_Zero()) &&
4560       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4561     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4562 
4563   const APInt *C1;
4564   if (!match(CmpRHS, m_APInt(C1)))
4565     return {SPF_UNKNOWN, SPNB_NA, false};
4566 
4567   // An unsigned min/max can be written with a signed compare.
4568   const APInt *C2;
4569   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4570       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4571     // Is the sign bit set?
4572     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4573     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4574     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4575         C2->isMaxSignedValue())
4576       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4577 
4578     // Is the sign bit clear?
4579     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4580     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4581     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4582         C2->isMinSignedValue())
4583       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4584   }
4585 
4586   // Look through 'not' ops to find disguised signed min/max.
4587   // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4588   // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4589   if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4590       match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4591     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4592 
4593   // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4594   // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4595   if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4596       match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4597     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4598 
4599   return {SPF_UNKNOWN, SPNB_NA, false};
4600 }
4601 
4602 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
4603   assert(X && Y && "Invalid operand");
4604 
4605   // X = sub (0, Y) || X = sub nsw (0, Y)
4606   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
4607       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
4608     return true;
4609 
4610   // Y = sub (0, X) || Y = sub nsw (0, X)
4611   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
4612       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
4613     return true;
4614 
4615   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
4616   Value *A, *B;
4617   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
4618                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
4619          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
4620                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
4621 }
4622 
4623 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
4624                                               FastMathFlags FMF,
4625                                               Value *CmpLHS, Value *CmpRHS,
4626                                               Value *TrueVal, Value *FalseVal,
4627                                               Value *&LHS, Value *&RHS,
4628                                               unsigned Depth) {
4629   LHS = CmpLHS;
4630   RHS = CmpRHS;
4631 
4632   // Signed zero may return inconsistent results between implementations.
4633   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4634   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4635   // Therefore, we behave conservatively and only proceed if at least one of the
4636   // operands is known to not be zero or if we don't care about signed zero.
4637   switch (Pred) {
4638   default: break;
4639   // FIXME: Include OGT/OLT/UGT/ULT.
4640   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
4641   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
4642     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4643         !isKnownNonZero(CmpRHS))
4644       return {SPF_UNKNOWN, SPNB_NA, false};
4645   }
4646 
4647   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4648   bool Ordered = false;
4649 
4650   // When given one NaN and one non-NaN input:
4651   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4652   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4653   //     ordered comparison fails), which could be NaN or non-NaN.
4654   // so here we discover exactly what NaN behavior is required/accepted.
4655   if (CmpInst::isFPPredicate(Pred)) {
4656     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4657     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4658 
4659     if (LHSSafe && RHSSafe) {
4660       // Both operands are known non-NaN.
4661       NaNBehavior = SPNB_RETURNS_ANY;
4662     } else if (CmpInst::isOrdered(Pred)) {
4663       // An ordered comparison will return false when given a NaN, so it
4664       // returns the RHS.
4665       Ordered = true;
4666       if (LHSSafe)
4667         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4668         NaNBehavior = SPNB_RETURNS_NAN;
4669       else if (RHSSafe)
4670         NaNBehavior = SPNB_RETURNS_OTHER;
4671       else
4672         // Completely unsafe.
4673         return {SPF_UNKNOWN, SPNB_NA, false};
4674     } else {
4675       Ordered = false;
4676       // An unordered comparison will return true when given a NaN, so it
4677       // returns the LHS.
4678       if (LHSSafe)
4679         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4680         NaNBehavior = SPNB_RETURNS_OTHER;
4681       else if (RHSSafe)
4682         NaNBehavior = SPNB_RETURNS_NAN;
4683       else
4684         // Completely unsafe.
4685         return {SPF_UNKNOWN, SPNB_NA, false};
4686     }
4687   }
4688 
4689   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4690     std::swap(CmpLHS, CmpRHS);
4691     Pred = CmpInst::getSwappedPredicate(Pred);
4692     if (NaNBehavior == SPNB_RETURNS_NAN)
4693       NaNBehavior = SPNB_RETURNS_OTHER;
4694     else if (NaNBehavior == SPNB_RETURNS_OTHER)
4695       NaNBehavior = SPNB_RETURNS_NAN;
4696     Ordered = !Ordered;
4697   }
4698 
4699   // ([if]cmp X, Y) ? X : Y
4700   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4701     switch (Pred) {
4702     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4703     case ICmpInst::ICMP_UGT:
4704     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4705     case ICmpInst::ICMP_SGT:
4706     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4707     case ICmpInst::ICMP_ULT:
4708     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4709     case ICmpInst::ICMP_SLT:
4710     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4711     case FCmpInst::FCMP_UGT:
4712     case FCmpInst::FCMP_UGE:
4713     case FCmpInst::FCMP_OGT:
4714     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4715     case FCmpInst::FCMP_ULT:
4716     case FCmpInst::FCMP_ULE:
4717     case FCmpInst::FCMP_OLT:
4718     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4719     }
4720   }
4721 
4722   if (isKnownNegation(TrueVal, FalseVal)) {
4723     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
4724     // match against either LHS or sext(LHS).
4725     auto MaybeSExtCmpLHS =
4726         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
4727     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
4728     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
4729     if (match(TrueVal, MaybeSExtCmpLHS)) {
4730       // Set the return values. If the compare uses the negated value (-X >s 0),
4731       // swap the return values because the negated value is always 'RHS'.
4732       LHS = TrueVal;
4733       RHS = FalseVal;
4734       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
4735         std::swap(LHS, RHS);
4736 
4737       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
4738       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
4739       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4740         return {SPF_ABS, SPNB_NA, false};
4741 
4742       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
4743       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
4744       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4745         return {SPF_NABS, SPNB_NA, false};
4746     }
4747     else if (match(FalseVal, MaybeSExtCmpLHS)) {
4748       // Set the return values. If the compare uses the negated value (-X >s 0),
4749       // swap the return values because the negated value is always 'RHS'.
4750       LHS = FalseVal;
4751       RHS = TrueVal;
4752       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
4753         std::swap(LHS, RHS);
4754 
4755       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
4756       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
4757       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4758         return {SPF_NABS, SPNB_NA, false};
4759 
4760       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
4761       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
4762       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4763         return {SPF_ABS, SPNB_NA, false};
4764     }
4765   }
4766 
4767   if (CmpInst::isIntPredicate(Pred))
4768     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4769 
4770   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4771   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4772   // semantics than minNum. Be conservative in such case.
4773   if (NaNBehavior != SPNB_RETURNS_ANY ||
4774       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4775        !isKnownNonZero(CmpRHS)))
4776     return {SPF_UNKNOWN, SPNB_NA, false};
4777 
4778   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4779 }
4780 
4781 /// Helps to match a select pattern in case of a type mismatch.
4782 ///
4783 /// The function processes the case when type of true and false values of a
4784 /// select instruction differs from type of the cmp instruction operands because
4785 /// of a cast instruction. The function checks if it is legal to move the cast
4786 /// operation after "select". If yes, it returns the new second value of
4787 /// "select" (with the assumption that cast is moved):
4788 /// 1. As operand of cast instruction when both values of "select" are same cast
4789 /// instructions.
4790 /// 2. As restored constant (by applying reverse cast operation) when the first
4791 /// value of the "select" is a cast operation and the second value is a
4792 /// constant.
4793 /// NOTE: We return only the new second value because the first value could be
4794 /// accessed as operand of cast instruction.
4795 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4796                               Instruction::CastOps *CastOp) {
4797   auto *Cast1 = dyn_cast<CastInst>(V1);
4798   if (!Cast1)
4799     return nullptr;
4800 
4801   *CastOp = Cast1->getOpcode();
4802   Type *SrcTy = Cast1->getSrcTy();
4803   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4804     // If V1 and V2 are both the same cast from the same type, look through V1.
4805     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4806       return Cast2->getOperand(0);
4807     return nullptr;
4808   }
4809 
4810   auto *C = dyn_cast<Constant>(V2);
4811   if (!C)
4812     return nullptr;
4813 
4814   Constant *CastedTo = nullptr;
4815   switch (*CastOp) {
4816   case Instruction::ZExt:
4817     if (CmpI->isUnsigned())
4818       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4819     break;
4820   case Instruction::SExt:
4821     if (CmpI->isSigned())
4822       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4823     break;
4824   case Instruction::Trunc:
4825     Constant *CmpConst;
4826     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4827         CmpConst->getType() == SrcTy) {
4828       // Here we have the following case:
4829       //
4830       //   %cond = cmp iN %x, CmpConst
4831       //   %tr = trunc iN %x to iK
4832       //   %narrowsel = select i1 %cond, iK %t, iK C
4833       //
4834       // We can always move trunc after select operation:
4835       //
4836       //   %cond = cmp iN %x, CmpConst
4837       //   %widesel = select i1 %cond, iN %x, iN CmpConst
4838       //   %tr = trunc iN %widesel to iK
4839       //
4840       // Note that C could be extended in any way because we don't care about
4841       // upper bits after truncation. It can't be abs pattern, because it would
4842       // look like:
4843       //
4844       //   select i1 %cond, x, -x.
4845       //
4846       // So only min/max pattern could be matched. Such match requires widened C
4847       // == CmpConst. That is why set widened C = CmpConst, condition trunc
4848       // CmpConst == C is checked below.
4849       CastedTo = CmpConst;
4850     } else {
4851       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4852     }
4853     break;
4854   case Instruction::FPTrunc:
4855     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4856     break;
4857   case Instruction::FPExt:
4858     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4859     break;
4860   case Instruction::FPToUI:
4861     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4862     break;
4863   case Instruction::FPToSI:
4864     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4865     break;
4866   case Instruction::UIToFP:
4867     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4868     break;
4869   case Instruction::SIToFP:
4870     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4871     break;
4872   default:
4873     break;
4874   }
4875 
4876   if (!CastedTo)
4877     return nullptr;
4878 
4879   // Make sure the cast doesn't lose any information.
4880   Constant *CastedBack =
4881       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4882   if (CastedBack != C)
4883     return nullptr;
4884 
4885   return CastedTo;
4886 }
4887 
4888 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
4889                                              Instruction::CastOps *CastOp,
4890                                              unsigned Depth) {
4891   if (Depth >= MaxDepth)
4892     return {SPF_UNKNOWN, SPNB_NA, false};
4893 
4894   SelectInst *SI = dyn_cast<SelectInst>(V);
4895   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4896 
4897   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4898   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4899 
4900   CmpInst::Predicate Pred = CmpI->getPredicate();
4901   Value *CmpLHS = CmpI->getOperand(0);
4902   Value *CmpRHS = CmpI->getOperand(1);
4903   Value *TrueVal = SI->getTrueValue();
4904   Value *FalseVal = SI->getFalseValue();
4905   FastMathFlags FMF;
4906   if (isa<FPMathOperator>(CmpI))
4907     FMF = CmpI->getFastMathFlags();
4908 
4909   // Bail out early.
4910   if (CmpI->isEquality())
4911     return {SPF_UNKNOWN, SPNB_NA, false};
4912 
4913   // Deal with type mismatches.
4914   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4915     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
4916       // If this is a potential fmin/fmax with a cast to integer, then ignore
4917       // -0.0 because there is no corresponding integer value.
4918       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4919         FMF.setNoSignedZeros();
4920       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4921                                   cast<CastInst>(TrueVal)->getOperand(0), C,
4922                                   LHS, RHS, Depth);
4923     }
4924     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
4925       // If this is a potential fmin/fmax with a cast to integer, then ignore
4926       // -0.0 because there is no corresponding integer value.
4927       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4928         FMF.setNoSignedZeros();
4929       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4930                                   C, cast<CastInst>(FalseVal)->getOperand(0),
4931                                   LHS, RHS, Depth);
4932     }
4933   }
4934   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4935                               LHS, RHS, Depth);
4936 }
4937 
4938 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
4939   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
4940   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
4941   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
4942   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
4943   if (SPF == SPF_FMINNUM)
4944     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
4945   if (SPF == SPF_FMAXNUM)
4946     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
4947   llvm_unreachable("unhandled!");
4948 }
4949 
4950 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
4951   if (SPF == SPF_SMIN) return SPF_SMAX;
4952   if (SPF == SPF_UMIN) return SPF_UMAX;
4953   if (SPF == SPF_SMAX) return SPF_SMIN;
4954   if (SPF == SPF_UMAX) return SPF_UMIN;
4955   llvm_unreachable("unhandled!");
4956 }
4957 
4958 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
4959   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
4960 }
4961 
4962 /// Return true if "icmp Pred LHS RHS" is always true.
4963 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
4964                             const Value *RHS, const DataLayout &DL,
4965                             unsigned Depth) {
4966   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4967   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4968     return true;
4969 
4970   switch (Pred) {
4971   default:
4972     return false;
4973 
4974   case CmpInst::ICMP_SLE: {
4975     const APInt *C;
4976 
4977     // LHS s<= LHS +_{nsw} C   if C >= 0
4978     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4979       return !C->isNegative();
4980     return false;
4981   }
4982 
4983   case CmpInst::ICMP_ULE: {
4984     const APInt *C;
4985 
4986     // LHS u<= LHS +_{nuw} C   for any C
4987     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4988       return true;
4989 
4990     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4991     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4992                                        const Value *&X,
4993                                        const APInt *&CA, const APInt *&CB) {
4994       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4995           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4996         return true;
4997 
4998       // If X & C == 0 then (X | C) == X +_{nuw} C
4999       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
5000           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
5001         KnownBits Known(CA->getBitWidth());
5002         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
5003                          /*CxtI*/ nullptr, /*DT*/ nullptr);
5004         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
5005           return true;
5006       }
5007 
5008       return false;
5009     };
5010 
5011     const Value *X;
5012     const APInt *CLHS, *CRHS;
5013     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
5014       return CLHS->ule(*CRHS);
5015 
5016     return false;
5017   }
5018   }
5019 }
5020 
5021 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
5022 /// ALHS ARHS" is true.  Otherwise, return None.
5023 static Optional<bool>
5024 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
5025                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
5026                       const DataLayout &DL, unsigned Depth) {
5027   switch (Pred) {
5028   default:
5029     return None;
5030 
5031   case CmpInst::ICMP_SLT:
5032   case CmpInst::ICMP_SLE:
5033     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
5034         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
5035       return true;
5036     return None;
5037 
5038   case CmpInst::ICMP_ULT:
5039   case CmpInst::ICMP_ULE:
5040     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
5041         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
5042       return true;
5043     return None;
5044   }
5045 }
5046 
5047 /// Return true if the operands of the two compares match.  IsSwappedOps is true
5048 /// when the operands match, but are swapped.
5049 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
5050                           const Value *BLHS, const Value *BRHS,
5051                           bool &IsSwappedOps) {
5052 
5053   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
5054   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
5055   return IsMatchingOps || IsSwappedOps;
5056 }
5057 
5058 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
5059 /// true.  Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
5060 /// BRHS" is false.  Otherwise, return None if we can't infer anything.
5061 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
5062                                                     const Value *ALHS,
5063                                                     const Value *ARHS,
5064                                                     CmpInst::Predicate BPred,
5065                                                     const Value *BLHS,
5066                                                     const Value *BRHS,
5067                                                     bool IsSwappedOps) {
5068   // Canonicalize the operands so they're matching.
5069   if (IsSwappedOps) {
5070     std::swap(BLHS, BRHS);
5071     BPred = ICmpInst::getSwappedPredicate(BPred);
5072   }
5073   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
5074     return true;
5075   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
5076     return false;
5077 
5078   return None;
5079 }
5080 
5081 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
5082 /// true.  Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
5083 /// C2" is false.  Otherwise, return None if we can't infer anything.
5084 static Optional<bool>
5085 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS,
5086                                  const ConstantInt *C1,
5087                                  CmpInst::Predicate BPred,
5088                                  const Value *BLHS, const ConstantInt *C2) {
5089   assert(ALHS == BLHS && "LHS operands must match.");
5090   ConstantRange DomCR =
5091       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
5092   ConstantRange CR =
5093       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
5094   ConstantRange Intersection = DomCR.intersectWith(CR);
5095   ConstantRange Difference = DomCR.difference(CR);
5096   if (Intersection.isEmptySet())
5097     return false;
5098   if (Difference.isEmptySet())
5099     return true;
5100   return None;
5101 }
5102 
5103 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
5104 /// false.  Otherwise, return None if we can't infer anything.
5105 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
5106                                          const ICmpInst *RHS,
5107                                          const DataLayout &DL, bool LHSIsTrue,
5108                                          unsigned Depth) {
5109   Value *ALHS = LHS->getOperand(0);
5110   Value *ARHS = LHS->getOperand(1);
5111   // The rest of the logic assumes the LHS condition is true.  If that's not the
5112   // case, invert the predicate to make it so.
5113   ICmpInst::Predicate APred =
5114       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
5115 
5116   Value *BLHS = RHS->getOperand(0);
5117   Value *BRHS = RHS->getOperand(1);
5118   ICmpInst::Predicate BPred = RHS->getPredicate();
5119 
5120   // Can we infer anything when the two compares have matching operands?
5121   bool IsSwappedOps;
5122   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
5123     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
5124             APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
5125       return Implication;
5126     // No amount of additional analysis will infer the second condition, so
5127     // early exit.
5128     return None;
5129   }
5130 
5131   // Can we infer anything when the LHS operands match and the RHS operands are
5132   // constants (not necessarily matching)?
5133   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
5134     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
5135             APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
5136             cast<ConstantInt>(BRHS)))
5137       return Implication;
5138     // No amount of additional analysis will infer the second condition, so
5139     // early exit.
5140     return None;
5141   }
5142 
5143   if (APred == BPred)
5144     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
5145   return None;
5146 }
5147 
5148 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
5149 /// false.  Otherwise, return None if we can't infer anything.  We expect the
5150 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
5151 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS,
5152                                          const ICmpInst *RHS,
5153                                          const DataLayout &DL, bool LHSIsTrue,
5154                                          unsigned Depth) {
5155   // The LHS must be an 'or' or an 'and' instruction.
5156   assert((LHS->getOpcode() == Instruction::And ||
5157           LHS->getOpcode() == Instruction::Or) &&
5158          "Expected LHS to be 'and' or 'or'.");
5159 
5160   assert(Depth <= MaxDepth && "Hit recursion limit");
5161 
5162   // If the result of an 'or' is false, then we know both legs of the 'or' are
5163   // false.  Similarly, if the result of an 'and' is true, then we know both
5164   // legs of the 'and' are true.
5165   Value *ALHS, *ARHS;
5166   if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
5167       (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
5168     // FIXME: Make this non-recursion.
5169     if (Optional<bool> Implication =
5170             isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
5171       return Implication;
5172     if (Optional<bool> Implication =
5173             isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
5174       return Implication;
5175     return None;
5176   }
5177   return None;
5178 }
5179 
5180 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
5181                                         const DataLayout &DL, bool LHSIsTrue,
5182                                         unsigned Depth) {
5183   // Bail out when we hit the limit.
5184   if (Depth == MaxDepth)
5185     return None;
5186 
5187   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
5188   // example.
5189   if (LHS->getType() != RHS->getType())
5190     return None;
5191 
5192   Type *OpTy = LHS->getType();
5193   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
5194 
5195   // LHS ==> RHS by definition
5196   if (LHS == RHS)
5197     return LHSIsTrue;
5198 
5199   // FIXME: Extending the code below to handle vectors.
5200   if (OpTy->isVectorTy())
5201     return None;
5202 
5203   assert(OpTy->isIntegerTy(1) && "implied by above");
5204 
5205   // Both LHS and RHS are icmps.
5206   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
5207   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
5208   if (LHSCmp && RHSCmp)
5209     return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
5210 
5211   // The LHS should be an 'or' or an 'and' instruction.  We expect the RHS to be
5212   // an icmp. FIXME: Add support for and/or on the RHS.
5213   const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
5214   if (LHSBO && RHSCmp) {
5215     if ((LHSBO->getOpcode() == Instruction::And ||
5216          LHSBO->getOpcode() == Instruction::Or))
5217       return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
5218   }
5219   return None;
5220 }
5221