1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/IntrinsicsAArch64.h"
55 #include "llvm/IR/IntrinsicsX86.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/User.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/KnownBits.h"
69 #include "llvm/Support/MathExtras.h"
70 #include <algorithm>
71 #include <array>
72 #include <cassert>
73 #include <cstdint>
74 #include <iterator>
75 #include <utility>
76 
77 using namespace llvm;
78 using namespace llvm::PatternMatch;
79 
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83                                               cl::Hidden, cl::init(20));
84 
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88   if (unsigned BitWidth = Ty->getScalarSizeInBits())
89     return BitWidth;
90 
91   return DL.getPointerTypeSizeInBits(Ty);
92 }
93 
94 namespace {
95 
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101   const DataLayout &DL;
102   AssumptionCache *AC;
103   const Instruction *CxtI;
104   const DominatorTree *DT;
105 
106   // Unlike the other analyses, this may be a nullptr because not all clients
107   // provide it currently.
108   OptimizationRemarkEmitter *ORE;
109 
110   /// Set of assumptions that should be excluded from further queries.
111   /// This is because of the potential for mutual recursion to cause
112   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113   /// classic case of this is assume(x = y), which will attempt to determine
114   /// bits in x from bits in y, which will attempt to determine bits in y from
115   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117   /// (all of which can call computeKnownBits), and so on.
118   std::array<const Value *, MaxAnalysisRecursionDepth> Excluded;
119 
120   /// If true, it is safe to use metadata during simplification.
121   InstrInfoQuery IIQ;
122 
123   unsigned NumExcluded = 0;
124 
125   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126         const DominatorTree *DT, bool UseInstrInfo,
127         OptimizationRemarkEmitter *ORE = nullptr)
128       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129 
130   Query(const Query &Q, const Value *NewExcl)
131       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132         NumExcluded(Q.NumExcluded) {
133     Excluded = Q.Excluded;
134     Excluded[NumExcluded++] = NewExcl;
135     assert(NumExcluded <= Excluded.size());
136   }
137 
138   bool isExcluded(const Value *Value) const {
139     if (NumExcluded == 0)
140       return false;
141     auto End = Excluded.begin() + NumExcluded;
142     return std::find(Excluded.begin(), End, Value) != End;
143   }
144 };
145 
146 } // end anonymous namespace
147 
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151   // If we've been provided with a context instruction, then use that (provided
152   // it has been inserted).
153   if (CxtI && CxtI->getParent())
154     return CxtI;
155 
156   // If the value is really an already-inserted instruction, then use that.
157   CxtI = dyn_cast<Instruction>(V);
158   if (CxtI && CxtI->getParent())
159     return CxtI;
160 
161   return nullptr;
162 }
163 
164 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
165   // If we've been provided with a context instruction, then use that (provided
166   // it has been inserted).
167   if (CxtI && CxtI->getParent())
168     return CxtI;
169 
170   // If the value is really an already-inserted instruction, then use that.
171   CxtI = dyn_cast<Instruction>(V1);
172   if (CxtI && CxtI->getParent())
173     return CxtI;
174 
175   CxtI = dyn_cast<Instruction>(V2);
176   if (CxtI && CxtI->getParent())
177     return CxtI;
178 
179   return nullptr;
180 }
181 
182 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
183                                    const APInt &DemandedElts,
184                                    APInt &DemandedLHS, APInt &DemandedRHS) {
185   // The length of scalable vectors is unknown at compile time, thus we
186   // cannot check their values
187   if (isa<ScalableVectorType>(Shuf->getType()))
188     return false;
189 
190   int NumElts =
191       cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
192   int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
193   DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
194   if (DemandedElts.isNullValue())
195     return true;
196   // Simple case of a shuffle with zeroinitializer.
197   if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
198     DemandedLHS.setBit(0);
199     return true;
200   }
201   for (int i = 0; i != NumMaskElts; ++i) {
202     if (!DemandedElts[i])
203       continue;
204     int M = Shuf->getMaskValue(i);
205     assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
206 
207     // For undef elements, we don't know anything about the common state of
208     // the shuffle result.
209     if (M == -1)
210       return false;
211     if (M < NumElts)
212       DemandedLHS.setBit(M % NumElts);
213     else
214       DemandedRHS.setBit(M % NumElts);
215   }
216 
217   return true;
218 }
219 
220 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
221                              KnownBits &Known, unsigned Depth, const Query &Q);
222 
223 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
224                              const Query &Q) {
225   // FIXME: We currently have no way to represent the DemandedElts of a scalable
226   // vector
227   if (isa<ScalableVectorType>(V->getType())) {
228     Known.resetAll();
229     return;
230   }
231 
232   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
233   APInt DemandedElts =
234       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
235   computeKnownBits(V, DemandedElts, Known, Depth, Q);
236 }
237 
238 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
239                             const DataLayout &DL, unsigned Depth,
240                             AssumptionCache *AC, const Instruction *CxtI,
241                             const DominatorTree *DT,
242                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
243   ::computeKnownBits(V, Known, Depth,
244                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
245 }
246 
247 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
248                             KnownBits &Known, const DataLayout &DL,
249                             unsigned Depth, AssumptionCache *AC,
250                             const Instruction *CxtI, const DominatorTree *DT,
251                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
252   ::computeKnownBits(V, DemandedElts, Known, Depth,
253                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
254 }
255 
256 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
257                                   unsigned Depth, const Query &Q);
258 
259 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
260                                   const Query &Q);
261 
262 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
263                                  unsigned Depth, AssumptionCache *AC,
264                                  const Instruction *CxtI,
265                                  const DominatorTree *DT,
266                                  OptimizationRemarkEmitter *ORE,
267                                  bool UseInstrInfo) {
268   return ::computeKnownBits(
269       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
270 }
271 
272 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
273                                  const DataLayout &DL, unsigned Depth,
274                                  AssumptionCache *AC, const Instruction *CxtI,
275                                  const DominatorTree *DT,
276                                  OptimizationRemarkEmitter *ORE,
277                                  bool UseInstrInfo) {
278   return ::computeKnownBits(
279       V, DemandedElts, Depth,
280       Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
281 }
282 
283 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
284                                const DataLayout &DL, AssumptionCache *AC,
285                                const Instruction *CxtI, const DominatorTree *DT,
286                                bool UseInstrInfo) {
287   assert(LHS->getType() == RHS->getType() &&
288          "LHS and RHS should have the same type");
289   assert(LHS->getType()->isIntOrIntVectorTy() &&
290          "LHS and RHS should be integers");
291   // Look for an inverted mask: (X & ~M) op (Y & M).
292   Value *M;
293   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
294       match(RHS, m_c_And(m_Specific(M), m_Value())))
295     return true;
296   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
297       match(LHS, m_c_And(m_Specific(M), m_Value())))
298     return true;
299   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
300   KnownBits LHSKnown(IT->getBitWidth());
301   KnownBits RHSKnown(IT->getBitWidth());
302   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
303   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
304   return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
305 }
306 
307 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
308   for (const User *U : CxtI->users()) {
309     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
310       if (IC->isEquality())
311         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
312           if (C->isNullValue())
313             continue;
314     return false;
315   }
316   return true;
317 }
318 
319 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
320                                    const Query &Q);
321 
322 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
323                                   bool OrZero, unsigned Depth,
324                                   AssumptionCache *AC, const Instruction *CxtI,
325                                   const DominatorTree *DT, bool UseInstrInfo) {
326   return ::isKnownToBeAPowerOfTwo(
327       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
328 }
329 
330 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
331                            unsigned Depth, const Query &Q);
332 
333 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
334 
335 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
336                           AssumptionCache *AC, const Instruction *CxtI,
337                           const DominatorTree *DT, bool UseInstrInfo) {
338   return ::isKnownNonZero(V, Depth,
339                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
340 }
341 
342 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
343                               unsigned Depth, AssumptionCache *AC,
344                               const Instruction *CxtI, const DominatorTree *DT,
345                               bool UseInstrInfo) {
346   KnownBits Known =
347       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
348   return Known.isNonNegative();
349 }
350 
351 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
352                            AssumptionCache *AC, const Instruction *CxtI,
353                            const DominatorTree *DT, bool UseInstrInfo) {
354   if (auto *CI = dyn_cast<ConstantInt>(V))
355     return CI->getValue().isStrictlyPositive();
356 
357   // TODO: We'd doing two recursive queries here.  We should factor this such
358   // that only a single query is needed.
359   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
360          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
361 }
362 
363 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
364                            AssumptionCache *AC, const Instruction *CxtI,
365                            const DominatorTree *DT, bool UseInstrInfo) {
366   KnownBits Known =
367       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
368   return Known.isNegative();
369 }
370 
371 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
372                             const Query &Q);
373 
374 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
375                            const DataLayout &DL, AssumptionCache *AC,
376                            const Instruction *CxtI, const DominatorTree *DT,
377                            bool UseInstrInfo) {
378   return ::isKnownNonEqual(V1, V2, 0,
379                            Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
380                                  UseInstrInfo, /*ORE=*/nullptr));
381 }
382 
383 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
384                               const Query &Q);
385 
386 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
387                              const DataLayout &DL, unsigned Depth,
388                              AssumptionCache *AC, const Instruction *CxtI,
389                              const DominatorTree *DT, bool UseInstrInfo) {
390   return ::MaskedValueIsZero(
391       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
392 }
393 
394 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
395                                    unsigned Depth, const Query &Q);
396 
397 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
398                                    const Query &Q) {
399   // FIXME: We currently have no way to represent the DemandedElts of a scalable
400   // vector
401   if (isa<ScalableVectorType>(V->getType()))
402     return 1;
403 
404   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
405   APInt DemandedElts =
406       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
407   return ComputeNumSignBits(V, DemandedElts, Depth, Q);
408 }
409 
410 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
411                                   unsigned Depth, AssumptionCache *AC,
412                                   const Instruction *CxtI,
413                                   const DominatorTree *DT, bool UseInstrInfo) {
414   return ::ComputeNumSignBits(
415       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
416 }
417 
418 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
419                                    bool NSW, const APInt &DemandedElts,
420                                    KnownBits &KnownOut, KnownBits &Known2,
421                                    unsigned Depth, const Query &Q) {
422   computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
423 
424   // If one operand is unknown and we have no nowrap information,
425   // the result will be unknown independently of the second operand.
426   if (KnownOut.isUnknown() && !NSW)
427     return;
428 
429   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
430   KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
431 }
432 
433 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
434                                 const APInt &DemandedElts, KnownBits &Known,
435                                 KnownBits &Known2, unsigned Depth,
436                                 const Query &Q) {
437   computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
438   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
439 
440   bool isKnownNegative = false;
441   bool isKnownNonNegative = false;
442   // If the multiplication is known not to overflow, compute the sign bit.
443   if (NSW) {
444     if (Op0 == Op1) {
445       // The product of a number with itself is non-negative.
446       isKnownNonNegative = true;
447     } else {
448       bool isKnownNonNegativeOp1 = Known.isNonNegative();
449       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
450       bool isKnownNegativeOp1 = Known.isNegative();
451       bool isKnownNegativeOp0 = Known2.isNegative();
452       // The product of two numbers with the same sign is non-negative.
453       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
454                            (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
455       // The product of a negative number and a non-negative number is either
456       // negative or zero.
457       if (!isKnownNonNegative)
458         isKnownNegative =
459             (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
460              Known2.isNonZero()) ||
461             (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
462     }
463   }
464 
465   Known = KnownBits::computeForMul(Known, Known2);
466 
467   // Only make use of no-wrap flags if we failed to compute the sign bit
468   // directly.  This matters if the multiplication always overflows, in
469   // which case we prefer to follow the result of the direct computation,
470   // though as the program is invoking undefined behaviour we can choose
471   // whatever we like here.
472   if (isKnownNonNegative && !Known.isNegative())
473     Known.makeNonNegative();
474   else if (isKnownNegative && !Known.isNonNegative())
475     Known.makeNegative();
476 }
477 
478 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
479                                              KnownBits &Known) {
480   unsigned BitWidth = Known.getBitWidth();
481   unsigned NumRanges = Ranges.getNumOperands() / 2;
482   assert(NumRanges >= 1);
483 
484   Known.Zero.setAllBits();
485   Known.One.setAllBits();
486 
487   for (unsigned i = 0; i < NumRanges; ++i) {
488     ConstantInt *Lower =
489         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
490     ConstantInt *Upper =
491         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
492     ConstantRange Range(Lower->getValue(), Upper->getValue());
493 
494     // The first CommonPrefixBits of all values in Range are equal.
495     unsigned CommonPrefixBits =
496         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
497     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
498     APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
499     Known.One &= UnsignedMax & Mask;
500     Known.Zero &= ~UnsignedMax & Mask;
501   }
502 }
503 
504 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
505   SmallVector<const Value *, 16> WorkSet(1, I);
506   SmallPtrSet<const Value *, 32> Visited;
507   SmallPtrSet<const Value *, 16> EphValues;
508 
509   // The instruction defining an assumption's condition itself is always
510   // considered ephemeral to that assumption (even if it has other
511   // non-ephemeral users). See r246696's test case for an example.
512   if (is_contained(I->operands(), E))
513     return true;
514 
515   while (!WorkSet.empty()) {
516     const Value *V = WorkSet.pop_back_val();
517     if (!Visited.insert(V).second)
518       continue;
519 
520     // If all uses of this value are ephemeral, then so is this value.
521     if (llvm::all_of(V->users(), [&](const User *U) {
522                                    return EphValues.count(U);
523                                  })) {
524       if (V == E)
525         return true;
526 
527       if (V == I || isSafeToSpeculativelyExecute(V)) {
528        EphValues.insert(V);
529        if (const User *U = dyn_cast<User>(V))
530          append_range(WorkSet, U->operands());
531       }
532     }
533   }
534 
535   return false;
536 }
537 
538 // Is this an intrinsic that cannot be speculated but also cannot trap?
539 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
540   if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
541     return CI->isAssumeLikeIntrinsic();
542 
543   return false;
544 }
545 
546 bool llvm::isValidAssumeForContext(const Instruction *Inv,
547                                    const Instruction *CxtI,
548                                    const DominatorTree *DT) {
549   // There are two restrictions on the use of an assume:
550   //  1. The assume must dominate the context (or the control flow must
551   //     reach the assume whenever it reaches the context).
552   //  2. The context must not be in the assume's set of ephemeral values
553   //     (otherwise we will use the assume to prove that the condition
554   //     feeding the assume is trivially true, thus causing the removal of
555   //     the assume).
556 
557   if (Inv->getParent() == CxtI->getParent()) {
558     // If Inv and CtxI are in the same block, check if the assume (Inv) is first
559     // in the BB.
560     if (Inv->comesBefore(CxtI))
561       return true;
562 
563     // Don't let an assume affect itself - this would cause the problems
564     // `isEphemeralValueOf` is trying to prevent, and it would also make
565     // the loop below go out of bounds.
566     if (Inv == CxtI)
567       return false;
568 
569     // The context comes first, but they're both in the same block.
570     // Make sure there is nothing in between that might interrupt
571     // the control flow, not even CxtI itself.
572     // We limit the scan distance between the assume and its context instruction
573     // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
574     // it can be adjusted if needed (could be turned into a cl::opt).
575     unsigned ScanLimit = 15;
576     for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
577       if (!isGuaranteedToTransferExecutionToSuccessor(&*I) || --ScanLimit == 0)
578         return false;
579 
580     return !isEphemeralValueOf(Inv, CxtI);
581   }
582 
583   // Inv and CxtI are in different blocks.
584   if (DT) {
585     if (DT->dominates(Inv, CxtI))
586       return true;
587   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
588     // We don't have a DT, but this trivially dominates.
589     return true;
590   }
591 
592   return false;
593 }
594 
595 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
596   // v u> y implies v != 0.
597   if (Pred == ICmpInst::ICMP_UGT)
598     return true;
599 
600   // Special-case v != 0 to also handle v != null.
601   if (Pred == ICmpInst::ICMP_NE)
602     return match(RHS, m_Zero());
603 
604   // All other predicates - rely on generic ConstantRange handling.
605   const APInt *C;
606   if (!match(RHS, m_APInt(C)))
607     return false;
608 
609   ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
610   return !TrueValues.contains(APInt::getNullValue(C->getBitWidth()));
611 }
612 
613 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
614   // Use of assumptions is context-sensitive. If we don't have a context, we
615   // cannot use them!
616   if (!Q.AC || !Q.CxtI)
617     return false;
618 
619   if (Q.CxtI && V->getType()->isPointerTy()) {
620     SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
621     if (!NullPointerIsDefined(Q.CxtI->getFunction(),
622                               V->getType()->getPointerAddressSpace()))
623       AttrKinds.push_back(Attribute::Dereferenceable);
624 
625     if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
626       return true;
627   }
628 
629   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
630     if (!AssumeVH)
631       continue;
632     CallInst *I = cast<CallInst>(AssumeVH);
633     assert(I->getFunction() == Q.CxtI->getFunction() &&
634            "Got assumption for the wrong function!");
635     if (Q.isExcluded(I))
636       continue;
637 
638     // Warning: This loop can end up being somewhat performance sensitive.
639     // We're running this loop for once for each value queried resulting in a
640     // runtime of ~O(#assumes * #values).
641 
642     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
643            "must be an assume intrinsic");
644 
645     Value *RHS;
646     CmpInst::Predicate Pred;
647     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
648     if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
649       return false;
650 
651     if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
652       return true;
653   }
654 
655   return false;
656 }
657 
658 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
659                                        unsigned Depth, const Query &Q) {
660   // Use of assumptions is context-sensitive. If we don't have a context, we
661   // cannot use them!
662   if (!Q.AC || !Q.CxtI)
663     return;
664 
665   unsigned BitWidth = Known.getBitWidth();
666 
667   // Refine Known set if the pointer alignment is set by assume bundles.
668   if (V->getType()->isPointerTy()) {
669     if (RetainedKnowledge RK = getKnowledgeValidInContext(
670             V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
671       Known.Zero.setLowBits(Log2_32(RK.ArgValue));
672     }
673   }
674 
675   // Note that the patterns below need to be kept in sync with the code
676   // in AssumptionCache::updateAffectedValues.
677 
678   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
679     if (!AssumeVH)
680       continue;
681     CallInst *I = cast<CallInst>(AssumeVH);
682     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
683            "Got assumption for the wrong function!");
684     if (Q.isExcluded(I))
685       continue;
686 
687     // Warning: This loop can end up being somewhat performance sensitive.
688     // We're running this loop for once for each value queried resulting in a
689     // runtime of ~O(#assumes * #values).
690 
691     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
692            "must be an assume intrinsic");
693 
694     Value *Arg = I->getArgOperand(0);
695 
696     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
697       assert(BitWidth == 1 && "assume operand is not i1?");
698       Known.setAllOnes();
699       return;
700     }
701     if (match(Arg, m_Not(m_Specific(V))) &&
702         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
703       assert(BitWidth == 1 && "assume operand is not i1?");
704       Known.setAllZero();
705       return;
706     }
707 
708     // The remaining tests are all recursive, so bail out if we hit the limit.
709     if (Depth == MaxAnalysisRecursionDepth)
710       continue;
711 
712     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
713     if (!Cmp)
714       continue;
715 
716     // Note that ptrtoint may change the bitwidth.
717     Value *A, *B;
718     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
719 
720     CmpInst::Predicate Pred;
721     uint64_t C;
722     switch (Cmp->getPredicate()) {
723     default:
724       break;
725     case ICmpInst::ICMP_EQ:
726       // assume(v = a)
727       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
728           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
729         KnownBits RHSKnown =
730             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
731         Known.Zero |= RHSKnown.Zero;
732         Known.One  |= RHSKnown.One;
733       // assume(v & b = a)
734       } else if (match(Cmp,
735                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
736                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
737         KnownBits RHSKnown =
738             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
739         KnownBits MaskKnown =
740             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
741 
742         // For those bits in the mask that are known to be one, we can propagate
743         // known bits from the RHS to V.
744         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
745         Known.One  |= RHSKnown.One  & MaskKnown.One;
746       // assume(~(v & b) = a)
747       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
748                                      m_Value(A))) &&
749                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
750         KnownBits RHSKnown =
751             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
752         KnownBits MaskKnown =
753             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
754 
755         // For those bits in the mask that are known to be one, we can propagate
756         // inverted known bits from the RHS to V.
757         Known.Zero |= RHSKnown.One  & MaskKnown.One;
758         Known.One  |= RHSKnown.Zero & MaskKnown.One;
759       // assume(v | b = a)
760       } else if (match(Cmp,
761                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
762                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
763         KnownBits RHSKnown =
764             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
765         KnownBits BKnown =
766             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
767 
768         // For those bits in B that are known to be zero, we can propagate known
769         // bits from the RHS to V.
770         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
771         Known.One  |= RHSKnown.One  & BKnown.Zero;
772       // assume(~(v | b) = a)
773       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
774                                      m_Value(A))) &&
775                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
776         KnownBits RHSKnown =
777             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
778         KnownBits BKnown =
779             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
780 
781         // For those bits in B that are known to be zero, we can propagate
782         // inverted known bits from the RHS to V.
783         Known.Zero |= RHSKnown.One  & BKnown.Zero;
784         Known.One  |= RHSKnown.Zero & BKnown.Zero;
785       // assume(v ^ b = a)
786       } else if (match(Cmp,
787                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
788                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
789         KnownBits RHSKnown =
790             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
791         KnownBits BKnown =
792             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
793 
794         // For those bits in B that are known to be zero, we can propagate known
795         // bits from the RHS to V. For those bits in B that are known to be one,
796         // we can propagate inverted known bits from the RHS to V.
797         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
798         Known.One  |= RHSKnown.One  & BKnown.Zero;
799         Known.Zero |= RHSKnown.One  & BKnown.One;
800         Known.One  |= RHSKnown.Zero & BKnown.One;
801       // assume(~(v ^ b) = a)
802       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
803                                      m_Value(A))) &&
804                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
805         KnownBits RHSKnown =
806             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
807         KnownBits BKnown =
808             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
809 
810         // For those bits in B that are known to be zero, we can propagate
811         // inverted known bits from the RHS to V. For those bits in B that are
812         // known to be one, we can propagate known bits from the RHS to V.
813         Known.Zero |= RHSKnown.One  & BKnown.Zero;
814         Known.One  |= RHSKnown.Zero & BKnown.Zero;
815         Known.Zero |= RHSKnown.Zero & BKnown.One;
816         Known.One  |= RHSKnown.One  & BKnown.One;
817       // assume(v << c = a)
818       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
819                                      m_Value(A))) &&
820                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
821         KnownBits RHSKnown =
822             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
823 
824         // For those bits in RHS that are known, we can propagate them to known
825         // bits in V shifted to the right by C.
826         RHSKnown.Zero.lshrInPlace(C);
827         Known.Zero |= RHSKnown.Zero;
828         RHSKnown.One.lshrInPlace(C);
829         Known.One  |= RHSKnown.One;
830       // assume(~(v << c) = a)
831       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
832                                      m_Value(A))) &&
833                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
834         KnownBits RHSKnown =
835             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
836         // For those bits in RHS that are known, we can propagate them inverted
837         // to known bits in V shifted to the right by C.
838         RHSKnown.One.lshrInPlace(C);
839         Known.Zero |= RHSKnown.One;
840         RHSKnown.Zero.lshrInPlace(C);
841         Known.One  |= RHSKnown.Zero;
842       // assume(v >> c = a)
843       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
844                                      m_Value(A))) &&
845                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
846         KnownBits RHSKnown =
847             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
848         // For those bits in RHS that are known, we can propagate them to known
849         // bits in V shifted to the right by C.
850         Known.Zero |= RHSKnown.Zero << C;
851         Known.One  |= RHSKnown.One  << C;
852       // assume(~(v >> c) = a)
853       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
854                                      m_Value(A))) &&
855                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
856         KnownBits RHSKnown =
857             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
858         // For those bits in RHS that are known, we can propagate them inverted
859         // to known bits in V shifted to the right by C.
860         Known.Zero |= RHSKnown.One  << C;
861         Known.One  |= RHSKnown.Zero << C;
862       }
863       break;
864     case ICmpInst::ICMP_SGE:
865       // assume(v >=_s c) where c is non-negative
866       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
867           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
868         KnownBits RHSKnown =
869             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
870 
871         if (RHSKnown.isNonNegative()) {
872           // We know that the sign bit is zero.
873           Known.makeNonNegative();
874         }
875       }
876       break;
877     case ICmpInst::ICMP_SGT:
878       // assume(v >_s c) where c is at least -1.
879       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
880           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
881         KnownBits RHSKnown =
882             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
883 
884         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
885           // We know that the sign bit is zero.
886           Known.makeNonNegative();
887         }
888       }
889       break;
890     case ICmpInst::ICMP_SLE:
891       // assume(v <=_s c) where c is negative
892       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
893           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
894         KnownBits RHSKnown =
895             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
896 
897         if (RHSKnown.isNegative()) {
898           // We know that the sign bit is one.
899           Known.makeNegative();
900         }
901       }
902       break;
903     case ICmpInst::ICMP_SLT:
904       // assume(v <_s c) where c is non-positive
905       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
906           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
907         KnownBits RHSKnown =
908             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
909 
910         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
911           // We know that the sign bit is one.
912           Known.makeNegative();
913         }
914       }
915       break;
916     case ICmpInst::ICMP_ULE:
917       // assume(v <=_u c)
918       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
919           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
920         KnownBits RHSKnown =
921             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
922 
923         // Whatever high bits in c are zero are known to be zero.
924         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
925       }
926       break;
927     case ICmpInst::ICMP_ULT:
928       // assume(v <_u c)
929       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
930           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
931         KnownBits RHSKnown =
932             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
933 
934         // If the RHS is known zero, then this assumption must be wrong (nothing
935         // is unsigned less than zero). Signal a conflict and get out of here.
936         if (RHSKnown.isZero()) {
937           Known.Zero.setAllBits();
938           Known.One.setAllBits();
939           break;
940         }
941 
942         // Whatever high bits in c are zero are known to be zero (if c is a power
943         // of 2, then one more).
944         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
945           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
946         else
947           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
948       }
949       break;
950     }
951   }
952 
953   // If assumptions conflict with each other or previous known bits, then we
954   // have a logical fallacy. It's possible that the assumption is not reachable,
955   // so this isn't a real bug. On the other hand, the program may have undefined
956   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
957   // clear out the known bits, try to warn the user, and hope for the best.
958   if (Known.Zero.intersects(Known.One)) {
959     Known.resetAll();
960 
961     if (Q.ORE)
962       Q.ORE->emit([&]() {
963         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
964         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
965                                           CxtI)
966                << "Detected conflicting code assumptions. Program may "
967                   "have undefined behavior, or compiler may have "
968                   "internal error.";
969       });
970   }
971 }
972 
973 /// Compute known bits from a shift operator, including those with a
974 /// non-constant shift amount. Known is the output of this function. Known2 is a
975 /// pre-allocated temporary with the same bit width as Known and on return
976 /// contains the known bit of the shift value source. KF is an
977 /// operator-specific function that, given the known-bits and a shift amount,
978 /// compute the implied known-bits of the shift operator's result respectively
979 /// for that shift amount. The results from calling KF are conservatively
980 /// combined for all permitted shift amounts.
981 static void computeKnownBitsFromShiftOperator(
982     const Operator *I, const APInt &DemandedElts, KnownBits &Known,
983     KnownBits &Known2, unsigned Depth, const Query &Q,
984     function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
985   unsigned BitWidth = Known.getBitWidth();
986   computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
987   computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
988 
989   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
990   // BitWidth > 64 and any upper bits are known, we'll end up returning the
991   // limit value (which implies all bits are known).
992   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
993   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
994   bool ShiftAmtIsConstant = Known.isConstant();
995   bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
996 
997   if (ShiftAmtIsConstant) {
998     Known = KF(Known2, Known);
999 
1000     // If the known bits conflict, this must be an overflowing left shift, so
1001     // the shift result is poison. We can return anything we want. Choose 0 for
1002     // the best folding opportunity.
1003     if (Known.hasConflict())
1004       Known.setAllZero();
1005 
1006     return;
1007   }
1008 
1009   // If the shift amount could be greater than or equal to the bit-width of the
1010   // LHS, the value could be poison, but bail out because the check below is
1011   // expensive.
1012   // TODO: Should we just carry on?
1013   if (MaxShiftAmtIsOutOfRange) {
1014     Known.resetAll();
1015     return;
1016   }
1017 
1018   // It would be more-clearly correct to use the two temporaries for this
1019   // calculation. Reusing the APInts here to prevent unnecessary allocations.
1020   Known.resetAll();
1021 
1022   // If we know the shifter operand is nonzero, we can sometimes infer more
1023   // known bits. However this is expensive to compute, so be lazy about it and
1024   // only compute it when absolutely necessary.
1025   Optional<bool> ShifterOperandIsNonZero;
1026 
1027   // Early exit if we can't constrain any well-defined shift amount.
1028   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1029       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1030     ShifterOperandIsNonZero =
1031         isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1032     if (!*ShifterOperandIsNonZero)
1033       return;
1034   }
1035 
1036   Known.Zero.setAllBits();
1037   Known.One.setAllBits();
1038   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1039     // Combine the shifted known input bits only for those shift amounts
1040     // compatible with its known constraints.
1041     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1042       continue;
1043     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1044       continue;
1045     // If we know the shifter is nonzero, we may be able to infer more known
1046     // bits. This check is sunk down as far as possible to avoid the expensive
1047     // call to isKnownNonZero if the cheaper checks above fail.
1048     if (ShiftAmt == 0) {
1049       if (!ShifterOperandIsNonZero.hasValue())
1050         ShifterOperandIsNonZero =
1051             isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1052       if (*ShifterOperandIsNonZero)
1053         continue;
1054     }
1055 
1056     Known = KnownBits::commonBits(
1057         Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1058   }
1059 
1060   // If the known bits conflict, the result is poison. Return a 0 and hope the
1061   // caller can further optimize that.
1062   if (Known.hasConflict())
1063     Known.setAllZero();
1064 }
1065 
1066 static void computeKnownBitsFromOperator(const Operator *I,
1067                                          const APInt &DemandedElts,
1068                                          KnownBits &Known, unsigned Depth,
1069                                          const Query &Q) {
1070   unsigned BitWidth = Known.getBitWidth();
1071 
1072   KnownBits Known2(BitWidth);
1073   switch (I->getOpcode()) {
1074   default: break;
1075   case Instruction::Load:
1076     if (MDNode *MD =
1077             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1078       computeKnownBitsFromRangeMetadata(*MD, Known);
1079     break;
1080   case Instruction::And: {
1081     // If either the LHS or the RHS are Zero, the result is zero.
1082     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1083     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1084 
1085     Known &= Known2;
1086 
1087     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1088     // here we handle the more general case of adding any odd number by
1089     // matching the form add(x, add(x, y)) where y is odd.
1090     // TODO: This could be generalized to clearing any bit set in y where the
1091     // following bit is known to be unset in y.
1092     Value *X = nullptr, *Y = nullptr;
1093     if (!Known.Zero[0] && !Known.One[0] &&
1094         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1095       Known2.resetAll();
1096       computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1097       if (Known2.countMinTrailingOnes() > 0)
1098         Known.Zero.setBit(0);
1099     }
1100     break;
1101   }
1102   case Instruction::Or:
1103     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1104     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1105 
1106     Known |= Known2;
1107     break;
1108   case Instruction::Xor:
1109     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1110     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1111 
1112     Known ^= Known2;
1113     break;
1114   case Instruction::Mul: {
1115     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1116     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1117                         Known, Known2, Depth, Q);
1118     break;
1119   }
1120   case Instruction::UDiv: {
1121     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1122     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1123     Known = KnownBits::udiv(Known, Known2);
1124     break;
1125   }
1126   case Instruction::Select: {
1127     const Value *LHS = nullptr, *RHS = nullptr;
1128     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1129     if (SelectPatternResult::isMinOrMax(SPF)) {
1130       computeKnownBits(RHS, Known, Depth + 1, Q);
1131       computeKnownBits(LHS, Known2, Depth + 1, Q);
1132       switch (SPF) {
1133       default:
1134         llvm_unreachable("Unhandled select pattern flavor!");
1135       case SPF_SMAX:
1136         Known = KnownBits::smax(Known, Known2);
1137         break;
1138       case SPF_SMIN:
1139         Known = KnownBits::smin(Known, Known2);
1140         break;
1141       case SPF_UMAX:
1142         Known = KnownBits::umax(Known, Known2);
1143         break;
1144       case SPF_UMIN:
1145         Known = KnownBits::umin(Known, Known2);
1146         break;
1147       }
1148       break;
1149     }
1150 
1151     computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1152     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1153 
1154     // Only known if known in both the LHS and RHS.
1155     Known = KnownBits::commonBits(Known, Known2);
1156 
1157     if (SPF == SPF_ABS) {
1158       // RHS from matchSelectPattern returns the negation part of abs pattern.
1159       // If the negate has an NSW flag we can assume the sign bit of the result
1160       // will be 0 because that makes abs(INT_MIN) undefined.
1161       if (match(RHS, m_Neg(m_Specific(LHS))) &&
1162           Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1163         Known.Zero.setSignBit();
1164     }
1165 
1166     break;
1167   }
1168   case Instruction::FPTrunc:
1169   case Instruction::FPExt:
1170   case Instruction::FPToUI:
1171   case Instruction::FPToSI:
1172   case Instruction::SIToFP:
1173   case Instruction::UIToFP:
1174     break; // Can't work with floating point.
1175   case Instruction::PtrToInt:
1176   case Instruction::IntToPtr:
1177     // Fall through and handle them the same as zext/trunc.
1178     LLVM_FALLTHROUGH;
1179   case Instruction::ZExt:
1180   case Instruction::Trunc: {
1181     Type *SrcTy = I->getOperand(0)->getType();
1182 
1183     unsigned SrcBitWidth;
1184     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1185     // which fall through here.
1186     Type *ScalarTy = SrcTy->getScalarType();
1187     SrcBitWidth = ScalarTy->isPointerTy() ?
1188       Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1189       Q.DL.getTypeSizeInBits(ScalarTy);
1190 
1191     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1192     Known = Known.anyextOrTrunc(SrcBitWidth);
1193     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1194     Known = Known.zextOrTrunc(BitWidth);
1195     break;
1196   }
1197   case Instruction::BitCast: {
1198     Type *SrcTy = I->getOperand(0)->getType();
1199     if (SrcTy->isIntOrPtrTy() &&
1200         // TODO: For now, not handling conversions like:
1201         // (bitcast i64 %x to <2 x i32>)
1202         !I->getType()->isVectorTy()) {
1203       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1204       break;
1205     }
1206     break;
1207   }
1208   case Instruction::SExt: {
1209     // Compute the bits in the result that are not present in the input.
1210     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1211 
1212     Known = Known.trunc(SrcBitWidth);
1213     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1214     // If the sign bit of the input is known set or clear, then we know the
1215     // top bits of the result.
1216     Known = Known.sext(BitWidth);
1217     break;
1218   }
1219   case Instruction::Shl: {
1220     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1221     auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1222       KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1223       // If this shift has "nsw" keyword, then the result is either a poison
1224       // value or has the same sign bit as the first operand.
1225       if (NSW) {
1226         if (KnownVal.Zero.isSignBitSet())
1227           Result.Zero.setSignBit();
1228         if (KnownVal.One.isSignBitSet())
1229           Result.One.setSignBit();
1230       }
1231       return Result;
1232     };
1233     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1234                                       KF);
1235     // Trailing zeros of a right-shifted constant never decrease.
1236     const APInt *C;
1237     if (match(I->getOperand(0), m_APInt(C)))
1238       Known.Zero.setLowBits(C->countTrailingZeros());
1239     break;
1240   }
1241   case Instruction::LShr: {
1242     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1243       return KnownBits::lshr(KnownVal, KnownAmt);
1244     };
1245     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1246                                       KF);
1247     // Leading zeros of a left-shifted constant never decrease.
1248     const APInt *C;
1249     if (match(I->getOperand(0), m_APInt(C)))
1250       Known.Zero.setHighBits(C->countLeadingZeros());
1251     break;
1252   }
1253   case Instruction::AShr: {
1254     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1255       return KnownBits::ashr(KnownVal, KnownAmt);
1256     };
1257     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1258                                       KF);
1259     break;
1260   }
1261   case Instruction::Sub: {
1262     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1263     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1264                            DemandedElts, Known, Known2, Depth, Q);
1265     break;
1266   }
1267   case Instruction::Add: {
1268     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1269     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1270                            DemandedElts, Known, Known2, Depth, Q);
1271     break;
1272   }
1273   case Instruction::SRem:
1274     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1275     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1276     Known = KnownBits::srem(Known, Known2);
1277     break;
1278 
1279   case Instruction::URem:
1280     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1281     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1282     Known = KnownBits::urem(Known, Known2);
1283     break;
1284   case Instruction::Alloca:
1285     Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1286     break;
1287   case Instruction::GetElementPtr: {
1288     // Analyze all of the subscripts of this getelementptr instruction
1289     // to determine if we can prove known low zero bits.
1290     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1291     // Accumulate the constant indices in a separate variable
1292     // to minimize the number of calls to computeForAddSub.
1293     APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1294 
1295     gep_type_iterator GTI = gep_type_begin(I);
1296     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1297       // TrailZ can only become smaller, short-circuit if we hit zero.
1298       if (Known.isUnknown())
1299         break;
1300 
1301       Value *Index = I->getOperand(i);
1302 
1303       // Handle case when index is zero.
1304       Constant *CIndex = dyn_cast<Constant>(Index);
1305       if (CIndex && CIndex->isZeroValue())
1306         continue;
1307 
1308       if (StructType *STy = GTI.getStructTypeOrNull()) {
1309         // Handle struct member offset arithmetic.
1310 
1311         assert(CIndex &&
1312                "Access to structure field must be known at compile time");
1313 
1314         if (CIndex->getType()->isVectorTy())
1315           Index = CIndex->getSplatValue();
1316 
1317         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1318         const StructLayout *SL = Q.DL.getStructLayout(STy);
1319         uint64_t Offset = SL->getElementOffset(Idx);
1320         AccConstIndices += Offset;
1321         continue;
1322       }
1323 
1324       // Handle array index arithmetic.
1325       Type *IndexedTy = GTI.getIndexedType();
1326       if (!IndexedTy->isSized()) {
1327         Known.resetAll();
1328         break;
1329       }
1330 
1331       unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1332       KnownBits IndexBits(IndexBitWidth);
1333       computeKnownBits(Index, IndexBits, Depth + 1, Q);
1334       TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1335       uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1336       KnownBits ScalingFactor(IndexBitWidth);
1337       // Multiply by current sizeof type.
1338       // &A[i] == A + i * sizeof(*A[i]).
1339       if (IndexTypeSize.isScalable()) {
1340         // For scalable types the only thing we know about sizeof is
1341         // that this is a multiple of the minimum size.
1342         ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1343       } else if (IndexBits.isConstant()) {
1344         APInt IndexConst = IndexBits.getConstant();
1345         APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1346         IndexConst *= ScalingFactor;
1347         AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1348         continue;
1349       } else {
1350         ScalingFactor =
1351             KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1352       }
1353       IndexBits = KnownBits::computeForMul(IndexBits, ScalingFactor);
1354 
1355       // If the offsets have a different width from the pointer, according
1356       // to the language reference we need to sign-extend or truncate them
1357       // to the width of the pointer.
1358       IndexBits = IndexBits.sextOrTrunc(BitWidth);
1359 
1360       // Note that inbounds does *not* guarantee nsw for the addition, as only
1361       // the offset is signed, while the base address is unsigned.
1362       Known = KnownBits::computeForAddSub(
1363           /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1364     }
1365     if (!Known.isUnknown() && !AccConstIndices.isNullValue()) {
1366       KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1367       Known = KnownBits::computeForAddSub(
1368           /*Add=*/true, /*NSW=*/false, Known, Index);
1369     }
1370     break;
1371   }
1372   case Instruction::PHI: {
1373     const PHINode *P = cast<PHINode>(I);
1374     BinaryOperator *BO = nullptr;
1375     Value *R = nullptr, *L = nullptr;
1376     if (matchSimpleRecurrence(P, BO, R, L)) {
1377       // Handle the case of a simple two-predecessor recurrence PHI.
1378       // There's a lot more that could theoretically be done here, but
1379       // this is sufficient to catch some interesting cases.
1380       unsigned Opcode = BO->getOpcode();
1381 
1382       // If this is a shift recurrence, we know the bits being shifted in.
1383       // We can combine that with information about the start value of the
1384       // recurrence to conclude facts about the result.
1385       if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1386            Opcode == Instruction::Shl) &&
1387           BO->getOperand(0) == I) {
1388 
1389         // We have matched a recurrence of the form:
1390         // %iv = [R, %entry], [%iv.next, %backedge]
1391         // %iv.next = shift_op %iv, L
1392 
1393         // Recurse with the phi context to avoid concern about whether facts
1394         // inferred hold at original context instruction.  TODO: It may be
1395         // correct to use the original context.  IF warranted, explore and
1396         // add sufficient tests to cover.
1397         Query RecQ = Q;
1398         RecQ.CxtI = P;
1399         computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1400         switch (Opcode) {
1401         case Instruction::Shl:
1402           // A shl recurrence will only increase the tailing zeros
1403           Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1404           break;
1405         case Instruction::LShr:
1406           // A lshr recurrence will preserve the leading zeros of the
1407           // start value
1408           Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1409           break;
1410         case Instruction::AShr:
1411           // An ashr recurrence will extend the initial sign bit
1412           Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1413           Known.One.setHighBits(Known2.countMinLeadingOnes());
1414           break;
1415         };
1416       }
1417 
1418       // Check for operations that have the property that if
1419       // both their operands have low zero bits, the result
1420       // will have low zero bits.
1421       if (Opcode == Instruction::Add ||
1422           Opcode == Instruction::Sub ||
1423           Opcode == Instruction::And ||
1424           Opcode == Instruction::Or ||
1425           Opcode == Instruction::Mul) {
1426         // Change the context instruction to the "edge" that flows into the
1427         // phi. This is important because that is where the value is actually
1428         // "evaluated" even though it is used later somewhere else. (see also
1429         // D69571).
1430         Query RecQ = Q;
1431 
1432         unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1433         Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1434         Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1435 
1436         // Ok, we have a PHI of the form L op= R. Check for low
1437         // zero bits.
1438         RecQ.CxtI = RInst;
1439         computeKnownBits(R, Known2, Depth + 1, RecQ);
1440 
1441         // We need to take the minimum number of known bits
1442         KnownBits Known3(BitWidth);
1443         RecQ.CxtI = LInst;
1444         computeKnownBits(L, Known3, Depth + 1, RecQ);
1445 
1446         Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1447                                        Known3.countMinTrailingZeros()));
1448 
1449         auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1450         if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1451           // If initial value of recurrence is nonnegative, and we are adding
1452           // a nonnegative number with nsw, the result can only be nonnegative
1453           // or poison value regardless of the number of times we execute the
1454           // add in phi recurrence. If initial value is negative and we are
1455           // adding a negative number with nsw, the result can only be
1456           // negative or poison value. Similar arguments apply to sub and mul.
1457           //
1458           // (add non-negative, non-negative) --> non-negative
1459           // (add negative, negative) --> negative
1460           if (Opcode == Instruction::Add) {
1461             if (Known2.isNonNegative() && Known3.isNonNegative())
1462               Known.makeNonNegative();
1463             else if (Known2.isNegative() && Known3.isNegative())
1464               Known.makeNegative();
1465           }
1466 
1467           // (sub nsw non-negative, negative) --> non-negative
1468           // (sub nsw negative, non-negative) --> negative
1469           else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1470             if (Known2.isNonNegative() && Known3.isNegative())
1471               Known.makeNonNegative();
1472             else if (Known2.isNegative() && Known3.isNonNegative())
1473               Known.makeNegative();
1474           }
1475 
1476           // (mul nsw non-negative, non-negative) --> non-negative
1477           else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1478                    Known3.isNonNegative())
1479             Known.makeNonNegative();
1480         }
1481 
1482         break;
1483       }
1484     }
1485 
1486     // Unreachable blocks may have zero-operand PHI nodes.
1487     if (P->getNumIncomingValues() == 0)
1488       break;
1489 
1490     // Otherwise take the unions of the known bit sets of the operands,
1491     // taking conservative care to avoid excessive recursion.
1492     if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1493       // Skip if every incoming value references to ourself.
1494       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1495         break;
1496 
1497       Known.Zero.setAllBits();
1498       Known.One.setAllBits();
1499       for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1500         Value *IncValue = P->getIncomingValue(u);
1501         // Skip direct self references.
1502         if (IncValue == P) continue;
1503 
1504         // Change the context instruction to the "edge" that flows into the
1505         // phi. This is important because that is where the value is actually
1506         // "evaluated" even though it is used later somewhere else. (see also
1507         // D69571).
1508         Query RecQ = Q;
1509         RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1510 
1511         Known2 = KnownBits(BitWidth);
1512         // Recurse, but cap the recursion to one level, because we don't
1513         // want to waste time spinning around in loops.
1514         computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1515         Known = KnownBits::commonBits(Known, Known2);
1516         // If all bits have been ruled out, there's no need to check
1517         // more operands.
1518         if (Known.isUnknown())
1519           break;
1520       }
1521     }
1522     break;
1523   }
1524   case Instruction::Call:
1525   case Instruction::Invoke:
1526     // If range metadata is attached to this call, set known bits from that,
1527     // and then intersect with known bits based on other properties of the
1528     // function.
1529     if (MDNode *MD =
1530             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1531       computeKnownBitsFromRangeMetadata(*MD, Known);
1532     if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1533       computeKnownBits(RV, Known2, Depth + 1, Q);
1534       Known.Zero |= Known2.Zero;
1535       Known.One |= Known2.One;
1536     }
1537     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1538       switch (II->getIntrinsicID()) {
1539       default: break;
1540       case Intrinsic::abs: {
1541         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1542         bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1543         Known = Known2.abs(IntMinIsPoison);
1544         break;
1545       }
1546       case Intrinsic::bitreverse:
1547         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1548         Known.Zero |= Known2.Zero.reverseBits();
1549         Known.One |= Known2.One.reverseBits();
1550         break;
1551       case Intrinsic::bswap:
1552         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1553         Known.Zero |= Known2.Zero.byteSwap();
1554         Known.One |= Known2.One.byteSwap();
1555         break;
1556       case Intrinsic::ctlz: {
1557         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1558         // If we have a known 1, its position is our upper bound.
1559         unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1560         // If this call is undefined for 0, the result will be less than 2^n.
1561         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1562           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1563         unsigned LowBits = Log2_32(PossibleLZ)+1;
1564         Known.Zero.setBitsFrom(LowBits);
1565         break;
1566       }
1567       case Intrinsic::cttz: {
1568         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1569         // If we have a known 1, its position is our upper bound.
1570         unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1571         // If this call is undefined for 0, the result will be less than 2^n.
1572         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1573           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1574         unsigned LowBits = Log2_32(PossibleTZ)+1;
1575         Known.Zero.setBitsFrom(LowBits);
1576         break;
1577       }
1578       case Intrinsic::ctpop: {
1579         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1580         // We can bound the space the count needs.  Also, bits known to be zero
1581         // can't contribute to the population.
1582         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1583         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1584         Known.Zero.setBitsFrom(LowBits);
1585         // TODO: we could bound KnownOne using the lower bound on the number
1586         // of bits which might be set provided by popcnt KnownOne2.
1587         break;
1588       }
1589       case Intrinsic::fshr:
1590       case Intrinsic::fshl: {
1591         const APInt *SA;
1592         if (!match(I->getOperand(2), m_APInt(SA)))
1593           break;
1594 
1595         // Normalize to funnel shift left.
1596         uint64_t ShiftAmt = SA->urem(BitWidth);
1597         if (II->getIntrinsicID() == Intrinsic::fshr)
1598           ShiftAmt = BitWidth - ShiftAmt;
1599 
1600         KnownBits Known3(BitWidth);
1601         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1602         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1603 
1604         Known.Zero =
1605             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1606         Known.One =
1607             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1608         break;
1609       }
1610       case Intrinsic::uadd_sat:
1611       case Intrinsic::usub_sat: {
1612         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1613         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1614         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1615 
1616         // Add: Leading ones of either operand are preserved.
1617         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1618         // as leading zeros in the result.
1619         unsigned LeadingKnown;
1620         if (IsAdd)
1621           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1622                                   Known2.countMinLeadingOnes());
1623         else
1624           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1625                                   Known2.countMinLeadingOnes());
1626 
1627         Known = KnownBits::computeForAddSub(
1628             IsAdd, /* NSW */ false, Known, Known2);
1629 
1630         // We select between the operation result and all-ones/zero
1631         // respectively, so we can preserve known ones/zeros.
1632         if (IsAdd) {
1633           Known.One.setHighBits(LeadingKnown);
1634           Known.Zero.clearAllBits();
1635         } else {
1636           Known.Zero.setHighBits(LeadingKnown);
1637           Known.One.clearAllBits();
1638         }
1639         break;
1640       }
1641       case Intrinsic::umin:
1642         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1643         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1644         Known = KnownBits::umin(Known, Known2);
1645         break;
1646       case Intrinsic::umax:
1647         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1648         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1649         Known = KnownBits::umax(Known, Known2);
1650         break;
1651       case Intrinsic::smin:
1652         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1653         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1654         Known = KnownBits::smin(Known, Known2);
1655         break;
1656       case Intrinsic::smax:
1657         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1658         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1659         Known = KnownBits::smax(Known, Known2);
1660         break;
1661       case Intrinsic::x86_sse42_crc32_64_64:
1662         Known.Zero.setBitsFrom(32);
1663         break;
1664       }
1665     }
1666     break;
1667   case Instruction::ShuffleVector: {
1668     auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1669     // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1670     if (!Shuf) {
1671       Known.resetAll();
1672       return;
1673     }
1674     // For undef elements, we don't know anything about the common state of
1675     // the shuffle result.
1676     APInt DemandedLHS, DemandedRHS;
1677     if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1678       Known.resetAll();
1679       return;
1680     }
1681     Known.One.setAllBits();
1682     Known.Zero.setAllBits();
1683     if (!!DemandedLHS) {
1684       const Value *LHS = Shuf->getOperand(0);
1685       computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1686       // If we don't know any bits, early out.
1687       if (Known.isUnknown())
1688         break;
1689     }
1690     if (!!DemandedRHS) {
1691       const Value *RHS = Shuf->getOperand(1);
1692       computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1693       Known = KnownBits::commonBits(Known, Known2);
1694     }
1695     break;
1696   }
1697   case Instruction::InsertElement: {
1698     const Value *Vec = I->getOperand(0);
1699     const Value *Elt = I->getOperand(1);
1700     auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1701     // Early out if the index is non-constant or out-of-range.
1702     unsigned NumElts = DemandedElts.getBitWidth();
1703     if (!CIdx || CIdx->getValue().uge(NumElts)) {
1704       Known.resetAll();
1705       return;
1706     }
1707     Known.One.setAllBits();
1708     Known.Zero.setAllBits();
1709     unsigned EltIdx = CIdx->getZExtValue();
1710     // Do we demand the inserted element?
1711     if (DemandedElts[EltIdx]) {
1712       computeKnownBits(Elt, Known, Depth + 1, Q);
1713       // If we don't know any bits, early out.
1714       if (Known.isUnknown())
1715         break;
1716     }
1717     // We don't need the base vector element that has been inserted.
1718     APInt DemandedVecElts = DemandedElts;
1719     DemandedVecElts.clearBit(EltIdx);
1720     if (!!DemandedVecElts) {
1721       computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1722       Known = KnownBits::commonBits(Known, Known2);
1723     }
1724     break;
1725   }
1726   case Instruction::ExtractElement: {
1727     // Look through extract element. If the index is non-constant or
1728     // out-of-range demand all elements, otherwise just the extracted element.
1729     const Value *Vec = I->getOperand(0);
1730     const Value *Idx = I->getOperand(1);
1731     auto *CIdx = dyn_cast<ConstantInt>(Idx);
1732     if (isa<ScalableVectorType>(Vec->getType())) {
1733       // FIXME: there's probably *something* we can do with scalable vectors
1734       Known.resetAll();
1735       break;
1736     }
1737     unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1738     APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1739     if (CIdx && CIdx->getValue().ult(NumElts))
1740       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1741     computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1742     break;
1743   }
1744   case Instruction::ExtractValue:
1745     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1746       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1747       if (EVI->getNumIndices() != 1) break;
1748       if (EVI->getIndices()[0] == 0) {
1749         switch (II->getIntrinsicID()) {
1750         default: break;
1751         case Intrinsic::uadd_with_overflow:
1752         case Intrinsic::sadd_with_overflow:
1753           computeKnownBitsAddSub(true, II->getArgOperand(0),
1754                                  II->getArgOperand(1), false, DemandedElts,
1755                                  Known, Known2, Depth, Q);
1756           break;
1757         case Intrinsic::usub_with_overflow:
1758         case Intrinsic::ssub_with_overflow:
1759           computeKnownBitsAddSub(false, II->getArgOperand(0),
1760                                  II->getArgOperand(1), false, DemandedElts,
1761                                  Known, Known2, Depth, Q);
1762           break;
1763         case Intrinsic::umul_with_overflow:
1764         case Intrinsic::smul_with_overflow:
1765           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1766                               DemandedElts, Known, Known2, Depth, Q);
1767           break;
1768         }
1769       }
1770     }
1771     break;
1772   case Instruction::Freeze:
1773     if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1774                                   Depth + 1))
1775       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1776     break;
1777   }
1778 }
1779 
1780 /// Determine which bits of V are known to be either zero or one and return
1781 /// them.
1782 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1783                            unsigned Depth, const Query &Q) {
1784   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1785   computeKnownBits(V, DemandedElts, Known, Depth, Q);
1786   return Known;
1787 }
1788 
1789 /// Determine which bits of V are known to be either zero or one and return
1790 /// them.
1791 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1792   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1793   computeKnownBits(V, Known, Depth, Q);
1794   return Known;
1795 }
1796 
1797 /// Determine which bits of V are known to be either zero or one and return
1798 /// them in the Known bit set.
1799 ///
1800 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1801 /// we cannot optimize based on the assumption that it is zero without changing
1802 /// it to be an explicit zero.  If we don't change it to zero, other code could
1803 /// optimized based on the contradictory assumption that it is non-zero.
1804 /// Because instcombine aggressively folds operations with undef args anyway,
1805 /// this won't lose us code quality.
1806 ///
1807 /// This function is defined on values with integer type, values with pointer
1808 /// type, and vectors of integers.  In the case
1809 /// where V is a vector, known zero, and known one values are the
1810 /// same width as the vector element, and the bit is set only if it is true
1811 /// for all of the demanded elements in the vector specified by DemandedElts.
1812 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1813                       KnownBits &Known, unsigned Depth, const Query &Q) {
1814   if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1815     // No demanded elts or V is a scalable vector, better to assume we don't
1816     // know anything.
1817     Known.resetAll();
1818     return;
1819   }
1820 
1821   assert(V && "No Value?");
1822   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1823 
1824 #ifndef NDEBUG
1825   Type *Ty = V->getType();
1826   unsigned BitWidth = Known.getBitWidth();
1827 
1828   assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1829          "Not integer or pointer type!");
1830 
1831   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1832     assert(
1833         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1834         "DemandedElt width should equal the fixed vector number of elements");
1835   } else {
1836     assert(DemandedElts == APInt(1, 1) &&
1837            "DemandedElt width should be 1 for scalars");
1838   }
1839 
1840   Type *ScalarTy = Ty->getScalarType();
1841   if (ScalarTy->isPointerTy()) {
1842     assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1843            "V and Known should have same BitWidth");
1844   } else {
1845     assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1846            "V and Known should have same BitWidth");
1847   }
1848 #endif
1849 
1850   const APInt *C;
1851   if (match(V, m_APInt(C))) {
1852     // We know all of the bits for a scalar constant or a splat vector constant!
1853     Known = KnownBits::makeConstant(*C);
1854     return;
1855   }
1856   // Null and aggregate-zero are all-zeros.
1857   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1858     Known.setAllZero();
1859     return;
1860   }
1861   // Handle a constant vector by taking the intersection of the known bits of
1862   // each element.
1863   if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1864     // We know that CDV must be a vector of integers. Take the intersection of
1865     // each element.
1866     Known.Zero.setAllBits(); Known.One.setAllBits();
1867     for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1868       if (!DemandedElts[i])
1869         continue;
1870       APInt Elt = CDV->getElementAsAPInt(i);
1871       Known.Zero &= ~Elt;
1872       Known.One &= Elt;
1873     }
1874     return;
1875   }
1876 
1877   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1878     // We know that CV must be a vector of integers. Take the intersection of
1879     // each element.
1880     Known.Zero.setAllBits(); Known.One.setAllBits();
1881     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1882       if (!DemandedElts[i])
1883         continue;
1884       Constant *Element = CV->getAggregateElement(i);
1885       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1886       if (!ElementCI) {
1887         Known.resetAll();
1888         return;
1889       }
1890       const APInt &Elt = ElementCI->getValue();
1891       Known.Zero &= ~Elt;
1892       Known.One &= Elt;
1893     }
1894     return;
1895   }
1896 
1897   // Start out not knowing anything.
1898   Known.resetAll();
1899 
1900   // We can't imply anything about undefs.
1901   if (isa<UndefValue>(V))
1902     return;
1903 
1904   // There's no point in looking through other users of ConstantData for
1905   // assumptions.  Confirm that we've handled them all.
1906   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1907 
1908   // All recursive calls that increase depth must come after this.
1909   if (Depth == MaxAnalysisRecursionDepth)
1910     return;
1911 
1912   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1913   // the bits of its aliasee.
1914   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1915     if (!GA->isInterposable())
1916       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1917     return;
1918   }
1919 
1920   if (const Operator *I = dyn_cast<Operator>(V))
1921     computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1922 
1923   // Aligned pointers have trailing zeros - refine Known.Zero set
1924   if (isa<PointerType>(V->getType())) {
1925     Align Alignment = V->getPointerAlignment(Q.DL);
1926     Known.Zero.setLowBits(Log2(Alignment));
1927   }
1928 
1929   // computeKnownBitsFromAssume strictly refines Known.
1930   // Therefore, we run them after computeKnownBitsFromOperator.
1931 
1932   // Check whether a nearby assume intrinsic can determine some known bits.
1933   computeKnownBitsFromAssume(V, Known, Depth, Q);
1934 
1935   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1936 }
1937 
1938 /// Return true if the given value is known to have exactly one
1939 /// bit set when defined. For vectors return true if every element is known to
1940 /// be a power of two when defined. Supports values with integer or pointer
1941 /// types and vectors of integers.
1942 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1943                             const Query &Q) {
1944   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1945 
1946   // Attempt to match against constants.
1947   if (OrZero && match(V, m_Power2OrZero()))
1948       return true;
1949   if (match(V, m_Power2()))
1950       return true;
1951 
1952   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1953   // it is shifted off the end then the result is undefined.
1954   if (match(V, m_Shl(m_One(), m_Value())))
1955     return true;
1956 
1957   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1958   // the bottom.  If it is shifted off the bottom then the result is undefined.
1959   if (match(V, m_LShr(m_SignMask(), m_Value())))
1960     return true;
1961 
1962   // The remaining tests are all recursive, so bail out if we hit the limit.
1963   if (Depth++ == MaxAnalysisRecursionDepth)
1964     return false;
1965 
1966   Value *X = nullptr, *Y = nullptr;
1967   // A shift left or a logical shift right of a power of two is a power of two
1968   // or zero.
1969   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1970                  match(V, m_LShr(m_Value(X), m_Value()))))
1971     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1972 
1973   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1974     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1975 
1976   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1977     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1978            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1979 
1980   // Peek through min/max.
1981   if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
1982     return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
1983            isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
1984   }
1985 
1986   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1987     // A power of two and'd with anything is a power of two or zero.
1988     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1989         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1990       return true;
1991     // X & (-X) is always a power of two or zero.
1992     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1993       return true;
1994     return false;
1995   }
1996 
1997   // Adding a power-of-two or zero to the same power-of-two or zero yields
1998   // either the original power-of-two, a larger power-of-two or zero.
1999   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2000     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2001     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2002         Q.IIQ.hasNoSignedWrap(VOBO)) {
2003       if (match(X, m_And(m_Specific(Y), m_Value())) ||
2004           match(X, m_And(m_Value(), m_Specific(Y))))
2005         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2006           return true;
2007       if (match(Y, m_And(m_Specific(X), m_Value())) ||
2008           match(Y, m_And(m_Value(), m_Specific(X))))
2009         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2010           return true;
2011 
2012       unsigned BitWidth = V->getType()->getScalarSizeInBits();
2013       KnownBits LHSBits(BitWidth);
2014       computeKnownBits(X, LHSBits, Depth, Q);
2015 
2016       KnownBits RHSBits(BitWidth);
2017       computeKnownBits(Y, RHSBits, Depth, Q);
2018       // If i8 V is a power of two or zero:
2019       //  ZeroBits: 1 1 1 0 1 1 1 1
2020       // ~ZeroBits: 0 0 0 1 0 0 0 0
2021       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2022         // If OrZero isn't set, we cannot give back a zero result.
2023         // Make sure either the LHS or RHS has a bit set.
2024         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2025           return true;
2026     }
2027   }
2028 
2029   // An exact divide or right shift can only shift off zero bits, so the result
2030   // is a power of two only if the first operand is a power of two and not
2031   // copying a sign bit (sdiv int_min, 2).
2032   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2033       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2034     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2035                                   Depth, Q);
2036   }
2037 
2038   return false;
2039 }
2040 
2041 /// Test whether a GEP's result is known to be non-null.
2042 ///
2043 /// Uses properties inherent in a GEP to try to determine whether it is known
2044 /// to be non-null.
2045 ///
2046 /// Currently this routine does not support vector GEPs.
2047 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2048                               const Query &Q) {
2049   const Function *F = nullptr;
2050   if (const Instruction *I = dyn_cast<Instruction>(GEP))
2051     F = I->getFunction();
2052 
2053   if (!GEP->isInBounds() ||
2054       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2055     return false;
2056 
2057   // FIXME: Support vector-GEPs.
2058   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2059 
2060   // If the base pointer is non-null, we cannot walk to a null address with an
2061   // inbounds GEP in address space zero.
2062   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2063     return true;
2064 
2065   // Walk the GEP operands and see if any operand introduces a non-zero offset.
2066   // If so, then the GEP cannot produce a null pointer, as doing so would
2067   // inherently violate the inbounds contract within address space zero.
2068   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2069        GTI != GTE; ++GTI) {
2070     // Struct types are easy -- they must always be indexed by a constant.
2071     if (StructType *STy = GTI.getStructTypeOrNull()) {
2072       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2073       unsigned ElementIdx = OpC->getZExtValue();
2074       const StructLayout *SL = Q.DL.getStructLayout(STy);
2075       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2076       if (ElementOffset > 0)
2077         return true;
2078       continue;
2079     }
2080 
2081     // If we have a zero-sized type, the index doesn't matter. Keep looping.
2082     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2083       continue;
2084 
2085     // Fast path the constant operand case both for efficiency and so we don't
2086     // increment Depth when just zipping down an all-constant GEP.
2087     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2088       if (!OpC->isZero())
2089         return true;
2090       continue;
2091     }
2092 
2093     // We post-increment Depth here because while isKnownNonZero increments it
2094     // as well, when we pop back up that increment won't persist. We don't want
2095     // to recurse 10k times just because we have 10k GEP operands. We don't
2096     // bail completely out because we want to handle constant GEPs regardless
2097     // of depth.
2098     if (Depth++ >= MaxAnalysisRecursionDepth)
2099       continue;
2100 
2101     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2102       return true;
2103   }
2104 
2105   return false;
2106 }
2107 
2108 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2109                                                   const Instruction *CtxI,
2110                                                   const DominatorTree *DT) {
2111   if (isa<Constant>(V))
2112     return false;
2113 
2114   if (!CtxI || !DT)
2115     return false;
2116 
2117   unsigned NumUsesExplored = 0;
2118   for (auto *U : V->users()) {
2119     // Avoid massive lists
2120     if (NumUsesExplored >= DomConditionsMaxUses)
2121       break;
2122     NumUsesExplored++;
2123 
2124     // If the value is used as an argument to a call or invoke, then argument
2125     // attributes may provide an answer about null-ness.
2126     if (const auto *CB = dyn_cast<CallBase>(U))
2127       if (auto *CalledFunc = CB->getCalledFunction())
2128         for (const Argument &Arg : CalledFunc->args())
2129           if (CB->getArgOperand(Arg.getArgNo()) == V &&
2130               Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2131               DT->dominates(CB, CtxI))
2132             return true;
2133 
2134     // If the value is used as a load/store, then the pointer must be non null.
2135     if (V == getLoadStorePointerOperand(U)) {
2136       const Instruction *I = cast<Instruction>(U);
2137       if (!NullPointerIsDefined(I->getFunction(),
2138                                 V->getType()->getPointerAddressSpace()) &&
2139           DT->dominates(I, CtxI))
2140         return true;
2141     }
2142 
2143     // Consider only compare instructions uniquely controlling a branch
2144     Value *RHS;
2145     CmpInst::Predicate Pred;
2146     if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2147       continue;
2148 
2149     bool NonNullIfTrue;
2150     if (cmpExcludesZero(Pred, RHS))
2151       NonNullIfTrue = true;
2152     else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2153       NonNullIfTrue = false;
2154     else
2155       continue;
2156 
2157     SmallVector<const User *, 4> WorkList;
2158     SmallPtrSet<const User *, 4> Visited;
2159     for (auto *CmpU : U->users()) {
2160       assert(WorkList.empty() && "Should be!");
2161       if (Visited.insert(CmpU).second)
2162         WorkList.push_back(CmpU);
2163 
2164       while (!WorkList.empty()) {
2165         auto *Curr = WorkList.pop_back_val();
2166 
2167         // If a user is an AND, add all its users to the work list. We only
2168         // propagate "pred != null" condition through AND because it is only
2169         // correct to assume that all conditions of AND are met in true branch.
2170         // TODO: Support similar logic of OR and EQ predicate?
2171         if (NonNullIfTrue)
2172           if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2173             for (auto *CurrU : Curr->users())
2174               if (Visited.insert(CurrU).second)
2175                 WorkList.push_back(CurrU);
2176             continue;
2177           }
2178 
2179         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2180           assert(BI->isConditional() && "uses a comparison!");
2181 
2182           BasicBlock *NonNullSuccessor =
2183               BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2184           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2185           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2186             return true;
2187         } else if (NonNullIfTrue && isGuard(Curr) &&
2188                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
2189           return true;
2190         }
2191       }
2192     }
2193   }
2194 
2195   return false;
2196 }
2197 
2198 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2199 /// ensure that the value it's attached to is never Value?  'RangeType' is
2200 /// is the type of the value described by the range.
2201 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2202   const unsigned NumRanges = Ranges->getNumOperands() / 2;
2203   assert(NumRanges >= 1);
2204   for (unsigned i = 0; i < NumRanges; ++i) {
2205     ConstantInt *Lower =
2206         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2207     ConstantInt *Upper =
2208         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2209     ConstantRange Range(Lower->getValue(), Upper->getValue());
2210     if (Range.contains(Value))
2211       return false;
2212   }
2213   return true;
2214 }
2215 
2216 static bool isNonZeroRecurrence(const PHINode *PN) {
2217   // Try and detect a recurrence that monotonically increases from a
2218   // starting value, as these are common as induction variables.
2219   BinaryOperator *BO = nullptr;
2220   Value *Start = nullptr, *Step = nullptr;
2221   const APInt *StartC, *StepC;
2222   if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2223       !match(Start, m_APInt(StartC)))
2224     return false;
2225 
2226   switch (BO->getOpcode()) {
2227   case Instruction::Add:
2228     return match(Step, m_APInt(StepC)) &&
2229            ((BO->hasNoUnsignedWrap() && !StartC->isNullValue() &&
2230              !StepC->isNullValue()) ||
2231             (BO->hasNoSignedWrap() && StartC->isStrictlyPositive() &&
2232              StepC->isNonNegative()));
2233   case Instruction::Mul:
2234     return !StartC->isNullValue() && match(Step, m_APInt(StepC)) &&
2235            ((BO->hasNoUnsignedWrap() && !StepC->isNullValue()) ||
2236             (BO->hasNoSignedWrap() && StepC->isStrictlyPositive()));
2237   case Instruction::Shl:
2238     return !StartC->isNullValue() &&
2239            (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap());
2240   case Instruction::AShr:
2241   case Instruction::LShr:
2242     return !StartC->isNullValue() && BO->isExact();
2243   default:
2244     return false;
2245   }
2246 }
2247 
2248 /// Return true if the given value is known to be non-zero when defined. For
2249 /// vectors, return true if every demanded element is known to be non-zero when
2250 /// defined. For pointers, if the context instruction and dominator tree are
2251 /// specified, perform context-sensitive analysis and return true if the
2252 /// pointer couldn't possibly be null at the specified instruction.
2253 /// Supports values with integer or pointer type and vectors of integers.
2254 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2255                     const Query &Q) {
2256   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2257   // vector
2258   if (isa<ScalableVectorType>(V->getType()))
2259     return false;
2260 
2261   if (auto *C = dyn_cast<Constant>(V)) {
2262     if (C->isNullValue())
2263       return false;
2264     if (isa<ConstantInt>(C))
2265       // Must be non-zero due to null test above.
2266       return true;
2267 
2268     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2269       // See the comment for IntToPtr/PtrToInt instructions below.
2270       if (CE->getOpcode() == Instruction::IntToPtr ||
2271           CE->getOpcode() == Instruction::PtrToInt)
2272         if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2273                 .getFixedSize() <=
2274             Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2275           return isKnownNonZero(CE->getOperand(0), Depth, Q);
2276     }
2277 
2278     // For constant vectors, check that all elements are undefined or known
2279     // non-zero to determine that the whole vector is known non-zero.
2280     if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2281       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2282         if (!DemandedElts[i])
2283           continue;
2284         Constant *Elt = C->getAggregateElement(i);
2285         if (!Elt || Elt->isNullValue())
2286           return false;
2287         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2288           return false;
2289       }
2290       return true;
2291     }
2292 
2293     // A global variable in address space 0 is non null unless extern weak
2294     // or an absolute symbol reference. Other address spaces may have null as a
2295     // valid address for a global, so we can't assume anything.
2296     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2297       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2298           GV->getType()->getAddressSpace() == 0)
2299         return true;
2300     } else
2301       return false;
2302   }
2303 
2304   if (auto *I = dyn_cast<Instruction>(V)) {
2305     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2306       // If the possible ranges don't contain zero, then the value is
2307       // definitely non-zero.
2308       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2309         const APInt ZeroValue(Ty->getBitWidth(), 0);
2310         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2311           return true;
2312       }
2313     }
2314   }
2315 
2316   if (isKnownNonZeroFromAssume(V, Q))
2317     return true;
2318 
2319   // Some of the tests below are recursive, so bail out if we hit the limit.
2320   if (Depth++ >= MaxAnalysisRecursionDepth)
2321     return false;
2322 
2323   // Check for pointer simplifications.
2324 
2325   if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2326     // Alloca never returns null, malloc might.
2327     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2328       return true;
2329 
2330     // A byval, inalloca may not be null in a non-default addres space. A
2331     // nonnull argument is assumed never 0.
2332     if (const Argument *A = dyn_cast<Argument>(V)) {
2333       if (((A->hasPassPointeeByValueCopyAttr() &&
2334             !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2335            A->hasNonNullAttr()))
2336         return true;
2337     }
2338 
2339     // A Load tagged with nonnull metadata is never null.
2340     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2341       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2342         return true;
2343 
2344     if (const auto *Call = dyn_cast<CallBase>(V)) {
2345       if (Call->isReturnNonNull())
2346         return true;
2347       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2348         return isKnownNonZero(RP, Depth, Q);
2349     }
2350   }
2351 
2352   if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2353     return true;
2354 
2355   // Check for recursive pointer simplifications.
2356   if (V->getType()->isPointerTy()) {
2357     // Look through bitcast operations, GEPs, and int2ptr instructions as they
2358     // do not alter the value, or at least not the nullness property of the
2359     // value, e.g., int2ptr is allowed to zero/sign extend the value.
2360     //
2361     // Note that we have to take special care to avoid looking through
2362     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2363     // as casts that can alter the value, e.g., AddrSpaceCasts.
2364     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2365       return isGEPKnownNonNull(GEP, Depth, Q);
2366 
2367     if (auto *BCO = dyn_cast<BitCastOperator>(V))
2368       return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2369 
2370     if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2371       if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2372           Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2373         return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2374   }
2375 
2376   // Similar to int2ptr above, we can look through ptr2int here if the cast
2377   // is a no-op or an extend and not a truncate.
2378   if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2379     if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2380         Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2381       return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2382 
2383   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2384 
2385   // X | Y != 0 if X != 0 or Y != 0.
2386   Value *X = nullptr, *Y = nullptr;
2387   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2388     return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2389            isKnownNonZero(Y, DemandedElts, Depth, Q);
2390 
2391   // ext X != 0 if X != 0.
2392   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2393     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2394 
2395   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2396   // if the lowest bit is shifted off the end.
2397   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2398     // shl nuw can't remove any non-zero bits.
2399     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2400     if (Q.IIQ.hasNoUnsignedWrap(BO))
2401       return isKnownNonZero(X, Depth, Q);
2402 
2403     KnownBits Known(BitWidth);
2404     computeKnownBits(X, DemandedElts, Known, Depth, Q);
2405     if (Known.One[0])
2406       return true;
2407   }
2408   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2409   // defined if the sign bit is shifted off the end.
2410   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2411     // shr exact can only shift out zero bits.
2412     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2413     if (BO->isExact())
2414       return isKnownNonZero(X, Depth, Q);
2415 
2416     KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2417     if (Known.isNegative())
2418       return true;
2419 
2420     // If the shifter operand is a constant, and all of the bits shifted
2421     // out are known to be zero, and X is known non-zero then at least one
2422     // non-zero bit must remain.
2423     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2424       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2425       // Is there a known one in the portion not shifted out?
2426       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2427         return true;
2428       // Are all the bits to be shifted out known zero?
2429       if (Known.countMinTrailingZeros() >= ShiftVal)
2430         return isKnownNonZero(X, DemandedElts, Depth, Q);
2431     }
2432   }
2433   // div exact can only produce a zero if the dividend is zero.
2434   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2435     return isKnownNonZero(X, DemandedElts, Depth, Q);
2436   }
2437   // X + Y.
2438   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2439     KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2440     KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2441 
2442     // If X and Y are both non-negative (as signed values) then their sum is not
2443     // zero unless both X and Y are zero.
2444     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2445       if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2446           isKnownNonZero(Y, DemandedElts, Depth, Q))
2447         return true;
2448 
2449     // If X and Y are both negative (as signed values) then their sum is not
2450     // zero unless both X and Y equal INT_MIN.
2451     if (XKnown.isNegative() && YKnown.isNegative()) {
2452       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2453       // The sign bit of X is set.  If some other bit is set then X is not equal
2454       // to INT_MIN.
2455       if (XKnown.One.intersects(Mask))
2456         return true;
2457       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2458       // to INT_MIN.
2459       if (YKnown.One.intersects(Mask))
2460         return true;
2461     }
2462 
2463     // The sum of a non-negative number and a power of two is not zero.
2464     if (XKnown.isNonNegative() &&
2465         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2466       return true;
2467     if (YKnown.isNonNegative() &&
2468         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2469       return true;
2470   }
2471   // X * Y.
2472   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2473     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2474     // If X and Y are non-zero then so is X * Y as long as the multiplication
2475     // does not overflow.
2476     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2477         isKnownNonZero(X, DemandedElts, Depth, Q) &&
2478         isKnownNonZero(Y, DemandedElts, Depth, Q))
2479       return true;
2480   }
2481   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2482   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2483     if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2484         isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2485       return true;
2486   }
2487   // PHI
2488   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2489     if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2490       return true;
2491 
2492     // Check if all incoming values are non-zero using recursion.
2493     Query RecQ = Q;
2494     unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2495     return llvm::all_of(PN->operands(), [&](const Use &U) {
2496       if (U.get() == PN)
2497         return true;
2498       RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2499       return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2500     });
2501   }
2502   // ExtractElement
2503   else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2504     const Value *Vec = EEI->getVectorOperand();
2505     const Value *Idx = EEI->getIndexOperand();
2506     auto *CIdx = dyn_cast<ConstantInt>(Idx);
2507     if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2508       unsigned NumElts = VecTy->getNumElements();
2509       APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2510       if (CIdx && CIdx->getValue().ult(NumElts))
2511         DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2512       return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2513     }
2514   }
2515   // Freeze
2516   else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2517     auto *Op = FI->getOperand(0);
2518     if (isKnownNonZero(Op, Depth, Q) &&
2519         isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2520       return true;
2521   }
2522 
2523   KnownBits Known(BitWidth);
2524   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2525   return Known.One != 0;
2526 }
2527 
2528 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2529   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2530   // vector
2531   if (isa<ScalableVectorType>(V->getType()))
2532     return false;
2533 
2534   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2535   APInt DemandedElts =
2536       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2537   return isKnownNonZero(V, DemandedElts, Depth, Q);
2538 }
2539 
2540 /// If the pair of operators are the same invertible function of a single
2541 /// operand return the index of that operand.  Otherwise, return None.  An
2542 /// invertible function is one that is 1-to-1 and maps every input value
2543 /// to exactly one output value.  This is equivalent to saying that O1
2544 /// and O2 are equal exactly when the specified pair of operands are equal,
2545 /// (except that O1 and O2 may be poison more often.)
2546 static Optional<unsigned> getInvertibleOperand(const Operator *O1,
2547                                                const Operator *O2) {
2548   if (O1->getOpcode() != O2->getOpcode())
2549     return None;
2550 
2551   switch (O1->getOpcode()) {
2552   default:
2553     break;
2554   case Instruction::Add:
2555   case Instruction::Sub:
2556     if (O1->getOperand(0) == O2->getOperand(0))
2557       return 1;
2558     if (O1->getOperand(1) == O2->getOperand(1))
2559       return 0;
2560     break;
2561   case Instruction::Mul: {
2562     // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2563     // and N is the bitwdith.  The nsw case is non-obvious, but proven by
2564     // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2565     auto *OBO1 = cast<OverflowingBinaryOperator>(O1);
2566     auto *OBO2 = cast<OverflowingBinaryOperator>(O2);
2567     if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2568         (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2569       break;
2570 
2571     // Assume operand order has been canonicalized
2572     if (O1->getOperand(1) == O2->getOperand(1) &&
2573         isa<ConstantInt>(O1->getOperand(1)) &&
2574         !cast<ConstantInt>(O1->getOperand(1))->isZero())
2575       return 0;
2576     break;
2577   }
2578   case Instruction::Shl: {
2579     // Same as multiplies, with the difference that we don't need to check
2580     // for a non-zero multiply. Shifts always multiply by non-zero.
2581     auto *OBO1 = cast<OverflowingBinaryOperator>(O1);
2582     auto *OBO2 = cast<OverflowingBinaryOperator>(O2);
2583     if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2584         (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2585       break;
2586 
2587     if (O1->getOperand(1) == O2->getOperand(1))
2588       return 0;
2589     break;
2590   }
2591   case Instruction::SExt:
2592   case Instruction::ZExt:
2593     if (O1->getOperand(0)->getType() == O2->getOperand(0)->getType())
2594       return 0;
2595     break;
2596   }
2597   return None;
2598 }
2599 
2600 /// Return true if V2 == V1 + X, where X is known non-zero.
2601 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2602                            const Query &Q) {
2603   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2604   if (!BO || BO->getOpcode() != Instruction::Add)
2605     return false;
2606   Value *Op = nullptr;
2607   if (V2 == BO->getOperand(0))
2608     Op = BO->getOperand(1);
2609   else if (V2 == BO->getOperand(1))
2610     Op = BO->getOperand(0);
2611   else
2612     return false;
2613   return isKnownNonZero(Op, Depth + 1, Q);
2614 }
2615 
2616 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2617 /// the multiplication is nuw or nsw.
2618 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2619                           const Query &Q) {
2620   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2621     const APInt *C;
2622     return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2623            (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2624            !C->isNullValue() && !C->isOneValue() &&
2625            isKnownNonZero(V1, Depth + 1, Q);
2626   }
2627   return false;
2628 }
2629 
2630 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
2631 /// the shift is nuw or nsw.
2632 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
2633                           const Query &Q) {
2634   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2635     const APInt *C;
2636     return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
2637            (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2638            !C->isNullValue() && isKnownNonZero(V1, Depth + 1, Q);
2639   }
2640   return false;
2641 }
2642 
2643 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
2644                            unsigned Depth, const Query &Q) {
2645   // Check two PHIs are in same block.
2646   if (PN1->getParent() != PN2->getParent())
2647     return false;
2648 
2649   SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
2650   bool UsedFullRecursion = false;
2651   for (const BasicBlock *IncomBB : PN1->blocks()) {
2652     if (!VisitedBBs.insert(IncomBB).second)
2653       continue; // Don't reprocess blocks that we have dealt with already.
2654     const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
2655     const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
2656     const APInt *C1, *C2;
2657     if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
2658       continue;
2659 
2660     // Only one pair of phi operands is allowed for full recursion.
2661     if (UsedFullRecursion)
2662       return false;
2663 
2664     Query RecQ = Q;
2665     RecQ.CxtI = IncomBB->getTerminator();
2666     if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
2667       return false;
2668     UsedFullRecursion = true;
2669   }
2670   return true;
2671 }
2672 
2673 /// Return true if it is known that V1 != V2.
2674 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2675                             const Query &Q) {
2676   if (V1 == V2)
2677     return false;
2678   if (V1->getType() != V2->getType())
2679     // We can't look through casts yet.
2680     return false;
2681 
2682   if (Depth >= MaxAnalysisRecursionDepth)
2683     return false;
2684 
2685   // See if we can recurse through (exactly one of) our operands.  This
2686   // requires our operation be 1-to-1 and map every input value to exactly
2687   // one output value.  Such an operation is invertible.
2688   auto *O1 = dyn_cast<Operator>(V1);
2689   auto *O2 = dyn_cast<Operator>(V2);
2690   if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2691     if (Optional<unsigned> Opt = getInvertibleOperand(O1, O2)) {
2692       unsigned Idx = *Opt;
2693       return isKnownNonEqual(O1->getOperand(Idx), O2->getOperand(Idx),
2694                              Depth + 1, Q);
2695     }
2696     if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
2697       const PHINode *PN2 = cast<PHINode>(V2);
2698       // FIXME: This is missing a generalization to handle the case where one is
2699       // a PHI and another one isn't.
2700       if (isNonEqualPHIs(PN1, PN2, Depth, Q))
2701         return true;
2702     };
2703   }
2704 
2705   if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2706     return true;
2707 
2708   if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2709     return true;
2710 
2711   if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
2712     return true;
2713 
2714   if (V1->getType()->isIntOrIntVectorTy()) {
2715     // Are any known bits in V1 contradictory to known bits in V2? If V1
2716     // has a known zero where V2 has a known one, they must not be equal.
2717     KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2718     KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2719 
2720     if (Known1.Zero.intersects(Known2.One) ||
2721         Known2.Zero.intersects(Known1.One))
2722       return true;
2723   }
2724   return false;
2725 }
2726 
2727 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2728 /// simplify operations downstream. Mask is known to be zero for bits that V
2729 /// cannot have.
2730 ///
2731 /// This function is defined on values with integer type, values with pointer
2732 /// type, and vectors of integers.  In the case
2733 /// where V is a vector, the mask, known zero, and known one values are the
2734 /// same width as the vector element, and the bit is set only if it is true
2735 /// for all of the elements in the vector.
2736 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2737                        const Query &Q) {
2738   KnownBits Known(Mask.getBitWidth());
2739   computeKnownBits(V, Known, Depth, Q);
2740   return Mask.isSubsetOf(Known.Zero);
2741 }
2742 
2743 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2744 // Returns the input and lower/upper bounds.
2745 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2746                                 const APInt *&CLow, const APInt *&CHigh) {
2747   assert(isa<Operator>(Select) &&
2748          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2749          "Input should be a Select!");
2750 
2751   const Value *LHS = nullptr, *RHS = nullptr;
2752   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2753   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2754     return false;
2755 
2756   if (!match(RHS, m_APInt(CLow)))
2757     return false;
2758 
2759   const Value *LHS2 = nullptr, *RHS2 = nullptr;
2760   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2761   if (getInverseMinMaxFlavor(SPF) != SPF2)
2762     return false;
2763 
2764   if (!match(RHS2, m_APInt(CHigh)))
2765     return false;
2766 
2767   if (SPF == SPF_SMIN)
2768     std::swap(CLow, CHigh);
2769 
2770   In = LHS2;
2771   return CLow->sle(*CHigh);
2772 }
2773 
2774 /// For vector constants, loop over the elements and find the constant with the
2775 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2776 /// or if any element was not analyzed; otherwise, return the count for the
2777 /// element with the minimum number of sign bits.
2778 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2779                                                  const APInt &DemandedElts,
2780                                                  unsigned TyBits) {
2781   const auto *CV = dyn_cast<Constant>(V);
2782   if (!CV || !isa<FixedVectorType>(CV->getType()))
2783     return 0;
2784 
2785   unsigned MinSignBits = TyBits;
2786   unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2787   for (unsigned i = 0; i != NumElts; ++i) {
2788     if (!DemandedElts[i])
2789       continue;
2790     // If we find a non-ConstantInt, bail out.
2791     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2792     if (!Elt)
2793       return 0;
2794 
2795     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2796   }
2797 
2798   return MinSignBits;
2799 }
2800 
2801 static unsigned ComputeNumSignBitsImpl(const Value *V,
2802                                        const APInt &DemandedElts,
2803                                        unsigned Depth, const Query &Q);
2804 
2805 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2806                                    unsigned Depth, const Query &Q) {
2807   unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2808   assert(Result > 0 && "At least one sign bit needs to be present!");
2809   return Result;
2810 }
2811 
2812 /// Return the number of times the sign bit of the register is replicated into
2813 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2814 /// (itself), but other cases can give us information. For example, immediately
2815 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2816 /// other, so we return 3. For vectors, return the number of sign bits for the
2817 /// vector element with the minimum number of known sign bits of the demanded
2818 /// elements in the vector specified by DemandedElts.
2819 static unsigned ComputeNumSignBitsImpl(const Value *V,
2820                                        const APInt &DemandedElts,
2821                                        unsigned Depth, const Query &Q) {
2822   Type *Ty = V->getType();
2823 
2824   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2825   // vector
2826   if (isa<ScalableVectorType>(Ty))
2827     return 1;
2828 
2829 #ifndef NDEBUG
2830   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2831 
2832   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2833     assert(
2834         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2835         "DemandedElt width should equal the fixed vector number of elements");
2836   } else {
2837     assert(DemandedElts == APInt(1, 1) &&
2838            "DemandedElt width should be 1 for scalars");
2839   }
2840 #endif
2841 
2842   // We return the minimum number of sign bits that are guaranteed to be present
2843   // in V, so for undef we have to conservatively return 1.  We don't have the
2844   // same behavior for poison though -- that's a FIXME today.
2845 
2846   Type *ScalarTy = Ty->getScalarType();
2847   unsigned TyBits = ScalarTy->isPointerTy() ?
2848     Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2849     Q.DL.getTypeSizeInBits(ScalarTy);
2850 
2851   unsigned Tmp, Tmp2;
2852   unsigned FirstAnswer = 1;
2853 
2854   // Note that ConstantInt is handled by the general computeKnownBits case
2855   // below.
2856 
2857   if (Depth == MaxAnalysisRecursionDepth)
2858     return 1;
2859 
2860   if (auto *U = dyn_cast<Operator>(V)) {
2861     switch (Operator::getOpcode(V)) {
2862     default: break;
2863     case Instruction::SExt:
2864       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2865       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2866 
2867     case Instruction::SDiv: {
2868       const APInt *Denominator;
2869       // sdiv X, C -> adds log(C) sign bits.
2870       if (match(U->getOperand(1), m_APInt(Denominator))) {
2871 
2872         // Ignore non-positive denominator.
2873         if (!Denominator->isStrictlyPositive())
2874           break;
2875 
2876         // Calculate the incoming numerator bits.
2877         unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2878 
2879         // Add floor(log(C)) bits to the numerator bits.
2880         return std::min(TyBits, NumBits + Denominator->logBase2());
2881       }
2882       break;
2883     }
2884 
2885     case Instruction::SRem: {
2886       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2887 
2888       const APInt *Denominator;
2889       // srem X, C -> we know that the result is within [-C+1,C) when C is a
2890       // positive constant.  This let us put a lower bound on the number of sign
2891       // bits.
2892       if (match(U->getOperand(1), m_APInt(Denominator))) {
2893 
2894         // Ignore non-positive denominator.
2895         if (Denominator->isStrictlyPositive()) {
2896           // Calculate the leading sign bit constraints by examining the
2897           // denominator.  Given that the denominator is positive, there are two
2898           // cases:
2899           //
2900           //  1. The numerator is positive. The result range is [0,C) and
2901           //     [0,C) u< (1 << ceilLogBase2(C)).
2902           //
2903           //  2. The numerator is negative. Then the result range is (-C,0] and
2904           //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2905           //
2906           // Thus a lower bound on the number of sign bits is `TyBits -
2907           // ceilLogBase2(C)`.
2908 
2909           unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2910           Tmp = std::max(Tmp, ResBits);
2911         }
2912       }
2913       return Tmp;
2914     }
2915 
2916     case Instruction::AShr: {
2917       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2918       // ashr X, C   -> adds C sign bits.  Vectors too.
2919       const APInt *ShAmt;
2920       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2921         if (ShAmt->uge(TyBits))
2922           break; // Bad shift.
2923         unsigned ShAmtLimited = ShAmt->getZExtValue();
2924         Tmp += ShAmtLimited;
2925         if (Tmp > TyBits) Tmp = TyBits;
2926       }
2927       return Tmp;
2928     }
2929     case Instruction::Shl: {
2930       const APInt *ShAmt;
2931       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2932         // shl destroys sign bits.
2933         Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2934         if (ShAmt->uge(TyBits) ||   // Bad shift.
2935             ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2936         Tmp2 = ShAmt->getZExtValue();
2937         return Tmp - Tmp2;
2938       }
2939       break;
2940     }
2941     case Instruction::And:
2942     case Instruction::Or:
2943     case Instruction::Xor: // NOT is handled here.
2944       // Logical binary ops preserve the number of sign bits at the worst.
2945       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2946       if (Tmp != 1) {
2947         Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2948         FirstAnswer = std::min(Tmp, Tmp2);
2949         // We computed what we know about the sign bits as our first
2950         // answer. Now proceed to the generic code that uses
2951         // computeKnownBits, and pick whichever answer is better.
2952       }
2953       break;
2954 
2955     case Instruction::Select: {
2956       // If we have a clamp pattern, we know that the number of sign bits will
2957       // be the minimum of the clamp min/max range.
2958       const Value *X;
2959       const APInt *CLow, *CHigh;
2960       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2961         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2962 
2963       Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2964       if (Tmp == 1) break;
2965       Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2966       return std::min(Tmp, Tmp2);
2967     }
2968 
2969     case Instruction::Add:
2970       // Add can have at most one carry bit.  Thus we know that the output
2971       // is, at worst, one more bit than the inputs.
2972       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2973       if (Tmp == 1) break;
2974 
2975       // Special case decrementing a value (ADD X, -1):
2976       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2977         if (CRHS->isAllOnesValue()) {
2978           KnownBits Known(TyBits);
2979           computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2980 
2981           // If the input is known to be 0 or 1, the output is 0/-1, which is
2982           // all sign bits set.
2983           if ((Known.Zero | 1).isAllOnesValue())
2984             return TyBits;
2985 
2986           // If we are subtracting one from a positive number, there is no carry
2987           // out of the result.
2988           if (Known.isNonNegative())
2989             return Tmp;
2990         }
2991 
2992       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2993       if (Tmp2 == 1) break;
2994       return std::min(Tmp, Tmp2) - 1;
2995 
2996     case Instruction::Sub:
2997       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2998       if (Tmp2 == 1) break;
2999 
3000       // Handle NEG.
3001       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3002         if (CLHS->isNullValue()) {
3003           KnownBits Known(TyBits);
3004           computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3005           // If the input is known to be 0 or 1, the output is 0/-1, which is
3006           // all sign bits set.
3007           if ((Known.Zero | 1).isAllOnesValue())
3008             return TyBits;
3009 
3010           // If the input is known to be positive (the sign bit is known clear),
3011           // the output of the NEG has the same number of sign bits as the
3012           // input.
3013           if (Known.isNonNegative())
3014             return Tmp2;
3015 
3016           // Otherwise, we treat this like a SUB.
3017         }
3018 
3019       // Sub can have at most one carry bit.  Thus we know that the output
3020       // is, at worst, one more bit than the inputs.
3021       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3022       if (Tmp == 1) break;
3023       return std::min(Tmp, Tmp2) - 1;
3024 
3025     case Instruction::Mul: {
3026       // The output of the Mul can be at most twice the valid bits in the
3027       // inputs.
3028       unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3029       if (SignBitsOp0 == 1) break;
3030       unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3031       if (SignBitsOp1 == 1) break;
3032       unsigned OutValidBits =
3033           (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3034       return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3035     }
3036 
3037     case Instruction::PHI: {
3038       const PHINode *PN = cast<PHINode>(U);
3039       unsigned NumIncomingValues = PN->getNumIncomingValues();
3040       // Don't analyze large in-degree PHIs.
3041       if (NumIncomingValues > 4) break;
3042       // Unreachable blocks may have zero-operand PHI nodes.
3043       if (NumIncomingValues == 0) break;
3044 
3045       // Take the minimum of all incoming values.  This can't infinitely loop
3046       // because of our depth threshold.
3047       Query RecQ = Q;
3048       Tmp = TyBits;
3049       for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3050         if (Tmp == 1) return Tmp;
3051         RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3052         Tmp = std::min(
3053             Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3054       }
3055       return Tmp;
3056     }
3057 
3058     case Instruction::Trunc:
3059       // FIXME: it's tricky to do anything useful for this, but it is an
3060       // important case for targets like X86.
3061       break;
3062 
3063     case Instruction::ExtractElement:
3064       // Look through extract element. At the moment we keep this simple and
3065       // skip tracking the specific element. But at least we might find
3066       // information valid for all elements of the vector (for example if vector
3067       // is sign extended, shifted, etc).
3068       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3069 
3070     case Instruction::ShuffleVector: {
3071       // Collect the minimum number of sign bits that are shared by every vector
3072       // element referenced by the shuffle.
3073       auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3074       if (!Shuf) {
3075         // FIXME: Add support for shufflevector constant expressions.
3076         return 1;
3077       }
3078       APInt DemandedLHS, DemandedRHS;
3079       // For undef elements, we don't know anything about the common state of
3080       // the shuffle result.
3081       if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3082         return 1;
3083       Tmp = std::numeric_limits<unsigned>::max();
3084       if (!!DemandedLHS) {
3085         const Value *LHS = Shuf->getOperand(0);
3086         Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3087       }
3088       // If we don't know anything, early out and try computeKnownBits
3089       // fall-back.
3090       if (Tmp == 1)
3091         break;
3092       if (!!DemandedRHS) {
3093         const Value *RHS = Shuf->getOperand(1);
3094         Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3095         Tmp = std::min(Tmp, Tmp2);
3096       }
3097       // If we don't know anything, early out and try computeKnownBits
3098       // fall-back.
3099       if (Tmp == 1)
3100         break;
3101       assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
3102       return Tmp;
3103     }
3104     case Instruction::Call: {
3105       if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3106         switch (II->getIntrinsicID()) {
3107         default: break;
3108         case Intrinsic::abs:
3109           Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3110           if (Tmp == 1) break;
3111 
3112           // Absolute value reduces number of sign bits by at most 1.
3113           return Tmp - 1;
3114         }
3115       }
3116     }
3117     }
3118   }
3119 
3120   // Finally, if we can prove that the top bits of the result are 0's or 1's,
3121   // use this information.
3122 
3123   // If we can examine all elements of a vector constant successfully, we're
3124   // done (we can't do any better than that). If not, keep trying.
3125   if (unsigned VecSignBits =
3126           computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3127     return VecSignBits;
3128 
3129   KnownBits Known(TyBits);
3130   computeKnownBits(V, DemandedElts, Known, Depth, Q);
3131 
3132   // If we know that the sign bit is either zero or one, determine the number of
3133   // identical bits in the top of the input value.
3134   return std::max(FirstAnswer, Known.countMinSignBits());
3135 }
3136 
3137 /// This function computes the integer multiple of Base that equals V.
3138 /// If successful, it returns true and returns the multiple in
3139 /// Multiple. If unsuccessful, it returns false. It looks
3140 /// through SExt instructions only if LookThroughSExt is true.
3141 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
3142                            bool LookThroughSExt, unsigned Depth) {
3143   assert(V && "No Value?");
3144   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3145   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
3146 
3147   Type *T = V->getType();
3148 
3149   ConstantInt *CI = dyn_cast<ConstantInt>(V);
3150 
3151   if (Base == 0)
3152     return false;
3153 
3154   if (Base == 1) {
3155     Multiple = V;
3156     return true;
3157   }
3158 
3159   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3160   Constant *BaseVal = ConstantInt::get(T, Base);
3161   if (CO && CO == BaseVal) {
3162     // Multiple is 1.
3163     Multiple = ConstantInt::get(T, 1);
3164     return true;
3165   }
3166 
3167   if (CI && CI->getZExtValue() % Base == 0) {
3168     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3169     return true;
3170   }
3171 
3172   if (Depth == MaxAnalysisRecursionDepth) return false;
3173 
3174   Operator *I = dyn_cast<Operator>(V);
3175   if (!I) return false;
3176 
3177   switch (I->getOpcode()) {
3178   default: break;
3179   case Instruction::SExt:
3180     if (!LookThroughSExt) return false;
3181     // otherwise fall through to ZExt
3182     LLVM_FALLTHROUGH;
3183   case Instruction::ZExt:
3184     return ComputeMultiple(I->getOperand(0), Base, Multiple,
3185                            LookThroughSExt, Depth+1);
3186   case Instruction::Shl:
3187   case Instruction::Mul: {
3188     Value *Op0 = I->getOperand(0);
3189     Value *Op1 = I->getOperand(1);
3190 
3191     if (I->getOpcode() == Instruction::Shl) {
3192       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3193       if (!Op1CI) return false;
3194       // Turn Op0 << Op1 into Op0 * 2^Op1
3195       APInt Op1Int = Op1CI->getValue();
3196       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3197       APInt API(Op1Int.getBitWidth(), 0);
3198       API.setBit(BitToSet);
3199       Op1 = ConstantInt::get(V->getContext(), API);
3200     }
3201 
3202     Value *Mul0 = nullptr;
3203     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3204       if (Constant *Op1C = dyn_cast<Constant>(Op1))
3205         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3206           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3207               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3208             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3209           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3210               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3211             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3212 
3213           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3214           Multiple = ConstantExpr::getMul(MulC, Op1C);
3215           return true;
3216         }
3217 
3218       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3219         if (Mul0CI->getValue() == 1) {
3220           // V == Base * Op1, so return Op1
3221           Multiple = Op1;
3222           return true;
3223         }
3224     }
3225 
3226     Value *Mul1 = nullptr;
3227     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3228       if (Constant *Op0C = dyn_cast<Constant>(Op0))
3229         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3230           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3231               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3232             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3233           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3234               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3235             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3236 
3237           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3238           Multiple = ConstantExpr::getMul(MulC, Op0C);
3239           return true;
3240         }
3241 
3242       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3243         if (Mul1CI->getValue() == 1) {
3244           // V == Base * Op0, so return Op0
3245           Multiple = Op0;
3246           return true;
3247         }
3248     }
3249   }
3250   }
3251 
3252   // We could not determine if V is a multiple of Base.
3253   return false;
3254 }
3255 
3256 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3257                                             const TargetLibraryInfo *TLI) {
3258   const Function *F = CB.getCalledFunction();
3259   if (!F)
3260     return Intrinsic::not_intrinsic;
3261 
3262   if (F->isIntrinsic())
3263     return F->getIntrinsicID();
3264 
3265   // We are going to infer semantics of a library function based on mapping it
3266   // to an LLVM intrinsic. Check that the library function is available from
3267   // this callbase and in this environment.
3268   LibFunc Func;
3269   if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3270       !CB.onlyReadsMemory())
3271     return Intrinsic::not_intrinsic;
3272 
3273   switch (Func) {
3274   default:
3275     break;
3276   case LibFunc_sin:
3277   case LibFunc_sinf:
3278   case LibFunc_sinl:
3279     return Intrinsic::sin;
3280   case LibFunc_cos:
3281   case LibFunc_cosf:
3282   case LibFunc_cosl:
3283     return Intrinsic::cos;
3284   case LibFunc_exp:
3285   case LibFunc_expf:
3286   case LibFunc_expl:
3287     return Intrinsic::exp;
3288   case LibFunc_exp2:
3289   case LibFunc_exp2f:
3290   case LibFunc_exp2l:
3291     return Intrinsic::exp2;
3292   case LibFunc_log:
3293   case LibFunc_logf:
3294   case LibFunc_logl:
3295     return Intrinsic::log;
3296   case LibFunc_log10:
3297   case LibFunc_log10f:
3298   case LibFunc_log10l:
3299     return Intrinsic::log10;
3300   case LibFunc_log2:
3301   case LibFunc_log2f:
3302   case LibFunc_log2l:
3303     return Intrinsic::log2;
3304   case LibFunc_fabs:
3305   case LibFunc_fabsf:
3306   case LibFunc_fabsl:
3307     return Intrinsic::fabs;
3308   case LibFunc_fmin:
3309   case LibFunc_fminf:
3310   case LibFunc_fminl:
3311     return Intrinsic::minnum;
3312   case LibFunc_fmax:
3313   case LibFunc_fmaxf:
3314   case LibFunc_fmaxl:
3315     return Intrinsic::maxnum;
3316   case LibFunc_copysign:
3317   case LibFunc_copysignf:
3318   case LibFunc_copysignl:
3319     return Intrinsic::copysign;
3320   case LibFunc_floor:
3321   case LibFunc_floorf:
3322   case LibFunc_floorl:
3323     return Intrinsic::floor;
3324   case LibFunc_ceil:
3325   case LibFunc_ceilf:
3326   case LibFunc_ceill:
3327     return Intrinsic::ceil;
3328   case LibFunc_trunc:
3329   case LibFunc_truncf:
3330   case LibFunc_truncl:
3331     return Intrinsic::trunc;
3332   case LibFunc_rint:
3333   case LibFunc_rintf:
3334   case LibFunc_rintl:
3335     return Intrinsic::rint;
3336   case LibFunc_nearbyint:
3337   case LibFunc_nearbyintf:
3338   case LibFunc_nearbyintl:
3339     return Intrinsic::nearbyint;
3340   case LibFunc_round:
3341   case LibFunc_roundf:
3342   case LibFunc_roundl:
3343     return Intrinsic::round;
3344   case LibFunc_roundeven:
3345   case LibFunc_roundevenf:
3346   case LibFunc_roundevenl:
3347     return Intrinsic::roundeven;
3348   case LibFunc_pow:
3349   case LibFunc_powf:
3350   case LibFunc_powl:
3351     return Intrinsic::pow;
3352   case LibFunc_sqrt:
3353   case LibFunc_sqrtf:
3354   case LibFunc_sqrtl:
3355     return Intrinsic::sqrt;
3356   }
3357 
3358   return Intrinsic::not_intrinsic;
3359 }
3360 
3361 /// Return true if we can prove that the specified FP value is never equal to
3362 /// -0.0.
3363 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3364 ///       that a value is not -0.0. It only guarantees that -0.0 may be treated
3365 ///       the same as +0.0 in floating-point ops.
3366 ///
3367 /// NOTE: this function will need to be revisited when we support non-default
3368 /// rounding modes!
3369 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3370                                 unsigned Depth) {
3371   if (auto *CFP = dyn_cast<ConstantFP>(V))
3372     return !CFP->getValueAPF().isNegZero();
3373 
3374   if (Depth == MaxAnalysisRecursionDepth)
3375     return false;
3376 
3377   auto *Op = dyn_cast<Operator>(V);
3378   if (!Op)
3379     return false;
3380 
3381   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3382   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3383     return true;
3384 
3385   // sitofp and uitofp turn into +0.0 for zero.
3386   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3387     return true;
3388 
3389   if (auto *Call = dyn_cast<CallInst>(Op)) {
3390     Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3391     switch (IID) {
3392     default:
3393       break;
3394     // sqrt(-0.0) = -0.0, no other negative results are possible.
3395     case Intrinsic::sqrt:
3396     case Intrinsic::canonicalize:
3397       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3398     // fabs(x) != -0.0
3399     case Intrinsic::fabs:
3400       return true;
3401     }
3402   }
3403 
3404   return false;
3405 }
3406 
3407 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3408 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3409 /// bit despite comparing equal.
3410 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3411                                             const TargetLibraryInfo *TLI,
3412                                             bool SignBitOnly,
3413                                             unsigned Depth) {
3414   // TODO: This function does not do the right thing when SignBitOnly is true
3415   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3416   // which flips the sign bits of NaNs.  See
3417   // https://llvm.org/bugs/show_bug.cgi?id=31702.
3418 
3419   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3420     return !CFP->getValueAPF().isNegative() ||
3421            (!SignBitOnly && CFP->getValueAPF().isZero());
3422   }
3423 
3424   // Handle vector of constants.
3425   if (auto *CV = dyn_cast<Constant>(V)) {
3426     if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3427       unsigned NumElts = CVFVTy->getNumElements();
3428       for (unsigned i = 0; i != NumElts; ++i) {
3429         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3430         if (!CFP)
3431           return false;
3432         if (CFP->getValueAPF().isNegative() &&
3433             (SignBitOnly || !CFP->getValueAPF().isZero()))
3434           return false;
3435       }
3436 
3437       // All non-negative ConstantFPs.
3438       return true;
3439     }
3440   }
3441 
3442   if (Depth == MaxAnalysisRecursionDepth)
3443     return false;
3444 
3445   const Operator *I = dyn_cast<Operator>(V);
3446   if (!I)
3447     return false;
3448 
3449   switch (I->getOpcode()) {
3450   default:
3451     break;
3452   // Unsigned integers are always nonnegative.
3453   case Instruction::UIToFP:
3454     return true;
3455   case Instruction::FMul:
3456   case Instruction::FDiv:
3457     // X * X is always non-negative or a NaN.
3458     // X / X is always exactly 1.0 or a NaN.
3459     if (I->getOperand(0) == I->getOperand(1) &&
3460         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3461       return true;
3462 
3463     LLVM_FALLTHROUGH;
3464   case Instruction::FAdd:
3465   case Instruction::FRem:
3466     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3467                                            Depth + 1) &&
3468            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3469                                            Depth + 1);
3470   case Instruction::Select:
3471     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3472                                            Depth + 1) &&
3473            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3474                                            Depth + 1);
3475   case Instruction::FPExt:
3476   case Instruction::FPTrunc:
3477     // Widening/narrowing never change sign.
3478     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3479                                            Depth + 1);
3480   case Instruction::ExtractElement:
3481     // Look through extract element. At the moment we keep this simple and skip
3482     // tracking the specific element. But at least we might find information
3483     // valid for all elements of the vector.
3484     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3485                                            Depth + 1);
3486   case Instruction::Call:
3487     const auto *CI = cast<CallInst>(I);
3488     Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3489     switch (IID) {
3490     default:
3491       break;
3492     case Intrinsic::maxnum: {
3493       Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3494       auto isPositiveNum = [&](Value *V) {
3495         if (SignBitOnly) {
3496           // With SignBitOnly, this is tricky because the result of
3497           // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3498           // a constant strictly greater than 0.0.
3499           const APFloat *C;
3500           return match(V, m_APFloat(C)) &&
3501                  *C > APFloat::getZero(C->getSemantics());
3502         }
3503 
3504         // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3505         // maxnum can't be ordered-less-than-zero.
3506         return isKnownNeverNaN(V, TLI) &&
3507                cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3508       };
3509 
3510       // TODO: This could be improved. We could also check that neither operand
3511       //       has its sign bit set (and at least 1 is not-NAN?).
3512       return isPositiveNum(V0) || isPositiveNum(V1);
3513     }
3514 
3515     case Intrinsic::maximum:
3516       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3517                                              Depth + 1) ||
3518              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3519                                              Depth + 1);
3520     case Intrinsic::minnum:
3521     case Intrinsic::minimum:
3522       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3523                                              Depth + 1) &&
3524              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3525                                              Depth + 1);
3526     case Intrinsic::exp:
3527     case Intrinsic::exp2:
3528     case Intrinsic::fabs:
3529       return true;
3530 
3531     case Intrinsic::sqrt:
3532       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3533       if (!SignBitOnly)
3534         return true;
3535       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3536                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3537 
3538     case Intrinsic::powi:
3539       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3540         // powi(x,n) is non-negative if n is even.
3541         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3542           return true;
3543       }
3544       // TODO: This is not correct.  Given that exp is an integer, here are the
3545       // ways that pow can return a negative value:
3546       //
3547       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3548       //   pow(-0, exp)   --> -inf if exp is negative odd.
3549       //   pow(-0, exp)   --> -0 if exp is positive odd.
3550       //   pow(-inf, exp) --> -0 if exp is negative odd.
3551       //   pow(-inf, exp) --> -inf if exp is positive odd.
3552       //
3553       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3554       // but we must return false if x == -0.  Unfortunately we do not currently
3555       // have a way of expressing this constraint.  See details in
3556       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3557       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3558                                              Depth + 1);
3559 
3560     case Intrinsic::fma:
3561     case Intrinsic::fmuladd:
3562       // x*x+y is non-negative if y is non-negative.
3563       return I->getOperand(0) == I->getOperand(1) &&
3564              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3565              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3566                                              Depth + 1);
3567     }
3568     break;
3569   }
3570   return false;
3571 }
3572 
3573 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3574                                        const TargetLibraryInfo *TLI) {
3575   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3576 }
3577 
3578 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3579   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3580 }
3581 
3582 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3583                                 unsigned Depth) {
3584   assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3585 
3586   // If we're told that infinities won't happen, assume they won't.
3587   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3588     if (FPMathOp->hasNoInfs())
3589       return true;
3590 
3591   // Handle scalar constants.
3592   if (auto *CFP = dyn_cast<ConstantFP>(V))
3593     return !CFP->isInfinity();
3594 
3595   if (Depth == MaxAnalysisRecursionDepth)
3596     return false;
3597 
3598   if (auto *Inst = dyn_cast<Instruction>(V)) {
3599     switch (Inst->getOpcode()) {
3600     case Instruction::Select: {
3601       return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3602              isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3603     }
3604     case Instruction::SIToFP:
3605     case Instruction::UIToFP: {
3606       // Get width of largest magnitude integer (remove a bit if signed).
3607       // This still works for a signed minimum value because the largest FP
3608       // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3609       int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3610       if (Inst->getOpcode() == Instruction::SIToFP)
3611         --IntSize;
3612 
3613       // If the exponent of the largest finite FP value can hold the largest
3614       // integer, the result of the cast must be finite.
3615       Type *FPTy = Inst->getType()->getScalarType();
3616       return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3617     }
3618     default:
3619       break;
3620     }
3621   }
3622 
3623   // try to handle fixed width vector constants
3624   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3625   if (VFVTy && isa<Constant>(V)) {
3626     // For vectors, verify that each element is not infinity.
3627     unsigned NumElts = VFVTy->getNumElements();
3628     for (unsigned i = 0; i != NumElts; ++i) {
3629       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3630       if (!Elt)
3631         return false;
3632       if (isa<UndefValue>(Elt))
3633         continue;
3634       auto *CElt = dyn_cast<ConstantFP>(Elt);
3635       if (!CElt || CElt->isInfinity())
3636         return false;
3637     }
3638     // All elements were confirmed non-infinity or undefined.
3639     return true;
3640   }
3641 
3642   // was not able to prove that V never contains infinity
3643   return false;
3644 }
3645 
3646 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3647                            unsigned Depth) {
3648   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3649 
3650   // If we're told that NaNs won't happen, assume they won't.
3651   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3652     if (FPMathOp->hasNoNaNs())
3653       return true;
3654 
3655   // Handle scalar constants.
3656   if (auto *CFP = dyn_cast<ConstantFP>(V))
3657     return !CFP->isNaN();
3658 
3659   if (Depth == MaxAnalysisRecursionDepth)
3660     return false;
3661 
3662   if (auto *Inst = dyn_cast<Instruction>(V)) {
3663     switch (Inst->getOpcode()) {
3664     case Instruction::FAdd:
3665     case Instruction::FSub:
3666       // Adding positive and negative infinity produces NaN.
3667       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3668              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3669              (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3670               isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3671 
3672     case Instruction::FMul:
3673       // Zero multiplied with infinity produces NaN.
3674       // FIXME: If neither side can be zero fmul never produces NaN.
3675       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3676              isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3677              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3678              isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3679 
3680     case Instruction::FDiv:
3681     case Instruction::FRem:
3682       // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3683       return false;
3684 
3685     case Instruction::Select: {
3686       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3687              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3688     }
3689     case Instruction::SIToFP:
3690     case Instruction::UIToFP:
3691       return true;
3692     case Instruction::FPTrunc:
3693     case Instruction::FPExt:
3694       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3695     default:
3696       break;
3697     }
3698   }
3699 
3700   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3701     switch (II->getIntrinsicID()) {
3702     case Intrinsic::canonicalize:
3703     case Intrinsic::fabs:
3704     case Intrinsic::copysign:
3705     case Intrinsic::exp:
3706     case Intrinsic::exp2:
3707     case Intrinsic::floor:
3708     case Intrinsic::ceil:
3709     case Intrinsic::trunc:
3710     case Intrinsic::rint:
3711     case Intrinsic::nearbyint:
3712     case Intrinsic::round:
3713     case Intrinsic::roundeven:
3714       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3715     case Intrinsic::sqrt:
3716       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3717              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3718     case Intrinsic::minnum:
3719     case Intrinsic::maxnum:
3720       // If either operand is not NaN, the result is not NaN.
3721       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3722              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3723     default:
3724       return false;
3725     }
3726   }
3727 
3728   // Try to handle fixed width vector constants
3729   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3730   if (VFVTy && isa<Constant>(V)) {
3731     // For vectors, verify that each element is not NaN.
3732     unsigned NumElts = VFVTy->getNumElements();
3733     for (unsigned i = 0; i != NumElts; ++i) {
3734       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3735       if (!Elt)
3736         return false;
3737       if (isa<UndefValue>(Elt))
3738         continue;
3739       auto *CElt = dyn_cast<ConstantFP>(Elt);
3740       if (!CElt || CElt->isNaN())
3741         return false;
3742     }
3743     // All elements were confirmed not-NaN or undefined.
3744     return true;
3745   }
3746 
3747   // Was not able to prove that V never contains NaN
3748   return false;
3749 }
3750 
3751 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3752 
3753   // All byte-wide stores are splatable, even of arbitrary variables.
3754   if (V->getType()->isIntegerTy(8))
3755     return V;
3756 
3757   LLVMContext &Ctx = V->getContext();
3758 
3759   // Undef don't care.
3760   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3761   if (isa<UndefValue>(V))
3762     return UndefInt8;
3763 
3764   // Return Undef for zero-sized type.
3765   if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3766     return UndefInt8;
3767 
3768   Constant *C = dyn_cast<Constant>(V);
3769   if (!C) {
3770     // Conceptually, we could handle things like:
3771     //   %a = zext i8 %X to i16
3772     //   %b = shl i16 %a, 8
3773     //   %c = or i16 %a, %b
3774     // but until there is an example that actually needs this, it doesn't seem
3775     // worth worrying about.
3776     return nullptr;
3777   }
3778 
3779   // Handle 'null' ConstantArrayZero etc.
3780   if (C->isNullValue())
3781     return Constant::getNullValue(Type::getInt8Ty(Ctx));
3782 
3783   // Constant floating-point values can be handled as integer values if the
3784   // corresponding integer value is "byteable".  An important case is 0.0.
3785   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3786     Type *Ty = nullptr;
3787     if (CFP->getType()->isHalfTy())
3788       Ty = Type::getInt16Ty(Ctx);
3789     else if (CFP->getType()->isFloatTy())
3790       Ty = Type::getInt32Ty(Ctx);
3791     else if (CFP->getType()->isDoubleTy())
3792       Ty = Type::getInt64Ty(Ctx);
3793     // Don't handle long double formats, which have strange constraints.
3794     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3795               : nullptr;
3796   }
3797 
3798   // We can handle constant integers that are multiple of 8 bits.
3799   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3800     if (CI->getBitWidth() % 8 == 0) {
3801       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3802       if (!CI->getValue().isSplat(8))
3803         return nullptr;
3804       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3805     }
3806   }
3807 
3808   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3809     if (CE->getOpcode() == Instruction::IntToPtr) {
3810       if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3811         unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3812         return isBytewiseValue(
3813             ConstantExpr::getIntegerCast(CE->getOperand(0),
3814                                          Type::getIntNTy(Ctx, BitWidth), false),
3815             DL);
3816       }
3817     }
3818   }
3819 
3820   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3821     if (LHS == RHS)
3822       return LHS;
3823     if (!LHS || !RHS)
3824       return nullptr;
3825     if (LHS == UndefInt8)
3826       return RHS;
3827     if (RHS == UndefInt8)
3828       return LHS;
3829     return nullptr;
3830   };
3831 
3832   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3833     Value *Val = UndefInt8;
3834     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3835       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3836         return nullptr;
3837     return Val;
3838   }
3839 
3840   if (isa<ConstantAggregate>(C)) {
3841     Value *Val = UndefInt8;
3842     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3843       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3844         return nullptr;
3845     return Val;
3846   }
3847 
3848   // Don't try to handle the handful of other constants.
3849   return nullptr;
3850 }
3851 
3852 // This is the recursive version of BuildSubAggregate. It takes a few different
3853 // arguments. Idxs is the index within the nested struct From that we are
3854 // looking at now (which is of type IndexedType). IdxSkip is the number of
3855 // indices from Idxs that should be left out when inserting into the resulting
3856 // struct. To is the result struct built so far, new insertvalue instructions
3857 // build on that.
3858 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3859                                 SmallVectorImpl<unsigned> &Idxs,
3860                                 unsigned IdxSkip,
3861                                 Instruction *InsertBefore) {
3862   StructType *STy = dyn_cast<StructType>(IndexedType);
3863   if (STy) {
3864     // Save the original To argument so we can modify it
3865     Value *OrigTo = To;
3866     // General case, the type indexed by Idxs is a struct
3867     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3868       // Process each struct element recursively
3869       Idxs.push_back(i);
3870       Value *PrevTo = To;
3871       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3872                              InsertBefore);
3873       Idxs.pop_back();
3874       if (!To) {
3875         // Couldn't find any inserted value for this index? Cleanup
3876         while (PrevTo != OrigTo) {
3877           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3878           PrevTo = Del->getAggregateOperand();
3879           Del->eraseFromParent();
3880         }
3881         // Stop processing elements
3882         break;
3883       }
3884     }
3885     // If we successfully found a value for each of our subaggregates
3886     if (To)
3887       return To;
3888   }
3889   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3890   // the struct's elements had a value that was inserted directly. In the latter
3891   // case, perhaps we can't determine each of the subelements individually, but
3892   // we might be able to find the complete struct somewhere.
3893 
3894   // Find the value that is at that particular spot
3895   Value *V = FindInsertedValue(From, Idxs);
3896 
3897   if (!V)
3898     return nullptr;
3899 
3900   // Insert the value in the new (sub) aggregate
3901   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3902                                  "tmp", InsertBefore);
3903 }
3904 
3905 // This helper takes a nested struct and extracts a part of it (which is again a
3906 // struct) into a new value. For example, given the struct:
3907 // { a, { b, { c, d }, e } }
3908 // and the indices "1, 1" this returns
3909 // { c, d }.
3910 //
3911 // It does this by inserting an insertvalue for each element in the resulting
3912 // struct, as opposed to just inserting a single struct. This will only work if
3913 // each of the elements of the substruct are known (ie, inserted into From by an
3914 // insertvalue instruction somewhere).
3915 //
3916 // All inserted insertvalue instructions are inserted before InsertBefore
3917 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3918                                 Instruction *InsertBefore) {
3919   assert(InsertBefore && "Must have someplace to insert!");
3920   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3921                                                              idx_range);
3922   Value *To = UndefValue::get(IndexedType);
3923   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3924   unsigned IdxSkip = Idxs.size();
3925 
3926   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3927 }
3928 
3929 /// Given an aggregate and a sequence of indices, see if the scalar value
3930 /// indexed is already around as a register, for example if it was inserted
3931 /// directly into the aggregate.
3932 ///
3933 /// If InsertBefore is not null, this function will duplicate (modified)
3934 /// insertvalues when a part of a nested struct is extracted.
3935 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3936                                Instruction *InsertBefore) {
3937   // Nothing to index? Just return V then (this is useful at the end of our
3938   // recursion).
3939   if (idx_range.empty())
3940     return V;
3941   // We have indices, so V should have an indexable type.
3942   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3943          "Not looking at a struct or array?");
3944   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3945          "Invalid indices for type?");
3946 
3947   if (Constant *C = dyn_cast<Constant>(V)) {
3948     C = C->getAggregateElement(idx_range[0]);
3949     if (!C) return nullptr;
3950     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3951   }
3952 
3953   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3954     // Loop the indices for the insertvalue instruction in parallel with the
3955     // requested indices
3956     const unsigned *req_idx = idx_range.begin();
3957     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3958          i != e; ++i, ++req_idx) {
3959       if (req_idx == idx_range.end()) {
3960         // We can't handle this without inserting insertvalues
3961         if (!InsertBefore)
3962           return nullptr;
3963 
3964         // The requested index identifies a part of a nested aggregate. Handle
3965         // this specially. For example,
3966         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3967         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3968         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3969         // This can be changed into
3970         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3971         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3972         // which allows the unused 0,0 element from the nested struct to be
3973         // removed.
3974         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3975                                  InsertBefore);
3976       }
3977 
3978       // This insert value inserts something else than what we are looking for.
3979       // See if the (aggregate) value inserted into has the value we are
3980       // looking for, then.
3981       if (*req_idx != *i)
3982         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3983                                  InsertBefore);
3984     }
3985     // If we end up here, the indices of the insertvalue match with those
3986     // requested (though possibly only partially). Now we recursively look at
3987     // the inserted value, passing any remaining indices.
3988     return FindInsertedValue(I->getInsertedValueOperand(),
3989                              makeArrayRef(req_idx, idx_range.end()),
3990                              InsertBefore);
3991   }
3992 
3993   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3994     // If we're extracting a value from an aggregate that was extracted from
3995     // something else, we can extract from that something else directly instead.
3996     // However, we will need to chain I's indices with the requested indices.
3997 
3998     // Calculate the number of indices required
3999     unsigned size = I->getNumIndices() + idx_range.size();
4000     // Allocate some space to put the new indices in
4001     SmallVector<unsigned, 5> Idxs;
4002     Idxs.reserve(size);
4003     // Add indices from the extract value instruction
4004     Idxs.append(I->idx_begin(), I->idx_end());
4005 
4006     // Add requested indices
4007     Idxs.append(idx_range.begin(), idx_range.end());
4008 
4009     assert(Idxs.size() == size
4010            && "Number of indices added not correct?");
4011 
4012     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4013   }
4014   // Otherwise, we don't know (such as, extracting from a function return value
4015   // or load instruction)
4016   return nullptr;
4017 }
4018 
4019 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
4020                                        unsigned CharSize) {
4021   // Make sure the GEP has exactly three arguments.
4022   if (GEP->getNumOperands() != 3)
4023     return false;
4024 
4025   // Make sure the index-ee is a pointer to array of \p CharSize integers.
4026   // CharSize.
4027   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4028   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4029     return false;
4030 
4031   // Check to make sure that the first operand of the GEP is an integer and
4032   // has value 0 so that we are sure we're indexing into the initializer.
4033   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4034   if (!FirstIdx || !FirstIdx->isZero())
4035     return false;
4036 
4037   return true;
4038 }
4039 
4040 bool llvm::getConstantDataArrayInfo(const Value *V,
4041                                     ConstantDataArraySlice &Slice,
4042                                     unsigned ElementSize, uint64_t Offset) {
4043   assert(V);
4044 
4045   // Look through bitcast instructions and geps.
4046   V = V->stripPointerCasts();
4047 
4048   // If the value is a GEP instruction or constant expression, treat it as an
4049   // offset.
4050   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4051     // The GEP operator should be based on a pointer to string constant, and is
4052     // indexing into the string constant.
4053     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
4054       return false;
4055 
4056     // If the second index isn't a ConstantInt, then this is a variable index
4057     // into the array.  If this occurs, we can't say anything meaningful about
4058     // the string.
4059     uint64_t StartIdx = 0;
4060     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
4061       StartIdx = CI->getZExtValue();
4062     else
4063       return false;
4064     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
4065                                     StartIdx + Offset);
4066   }
4067 
4068   // The GEP instruction, constant or instruction, must reference a global
4069   // variable that is a constant and is initialized. The referenced constant
4070   // initializer is the array that we'll use for optimization.
4071   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
4072   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4073     return false;
4074 
4075   const ConstantDataArray *Array;
4076   ArrayType *ArrayTy;
4077   if (GV->getInitializer()->isNullValue()) {
4078     Type *GVTy = GV->getValueType();
4079     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
4080       // A zeroinitializer for the array; there is no ConstantDataArray.
4081       Array = nullptr;
4082     } else {
4083       const DataLayout &DL = GV->getParent()->getDataLayout();
4084       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4085       uint64_t Length = SizeInBytes / (ElementSize / 8);
4086       if (Length <= Offset)
4087         return false;
4088 
4089       Slice.Array = nullptr;
4090       Slice.Offset = 0;
4091       Slice.Length = Length - Offset;
4092       return true;
4093     }
4094   } else {
4095     // This must be a ConstantDataArray.
4096     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
4097     if (!Array)
4098       return false;
4099     ArrayTy = Array->getType();
4100   }
4101   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
4102     return false;
4103 
4104   uint64_t NumElts = ArrayTy->getArrayNumElements();
4105   if (Offset > NumElts)
4106     return false;
4107 
4108   Slice.Array = Array;
4109   Slice.Offset = Offset;
4110   Slice.Length = NumElts - Offset;
4111   return true;
4112 }
4113 
4114 /// This function computes the length of a null-terminated C string pointed to
4115 /// by V. If successful, it returns true and returns the string in Str.
4116 /// If unsuccessful, it returns false.
4117 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4118                                  uint64_t Offset, bool TrimAtNul) {
4119   ConstantDataArraySlice Slice;
4120   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4121     return false;
4122 
4123   if (Slice.Array == nullptr) {
4124     if (TrimAtNul) {
4125       Str = StringRef();
4126       return true;
4127     }
4128     if (Slice.Length == 1) {
4129       Str = StringRef("", 1);
4130       return true;
4131     }
4132     // We cannot instantiate a StringRef as we do not have an appropriate string
4133     // of 0s at hand.
4134     return false;
4135   }
4136 
4137   // Start out with the entire array in the StringRef.
4138   Str = Slice.Array->getAsString();
4139   // Skip over 'offset' bytes.
4140   Str = Str.substr(Slice.Offset);
4141 
4142   if (TrimAtNul) {
4143     // Trim off the \0 and anything after it.  If the array is not nul
4144     // terminated, we just return the whole end of string.  The client may know
4145     // some other way that the string is length-bound.
4146     Str = Str.substr(0, Str.find('\0'));
4147   }
4148   return true;
4149 }
4150 
4151 // These next two are very similar to the above, but also look through PHI
4152 // nodes.
4153 // TODO: See if we can integrate these two together.
4154 
4155 /// If we can compute the length of the string pointed to by
4156 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4157 static uint64_t GetStringLengthH(const Value *V,
4158                                  SmallPtrSetImpl<const PHINode*> &PHIs,
4159                                  unsigned CharSize) {
4160   // Look through noop bitcast instructions.
4161   V = V->stripPointerCasts();
4162 
4163   // If this is a PHI node, there are two cases: either we have already seen it
4164   // or we haven't.
4165   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4166     if (!PHIs.insert(PN).second)
4167       return ~0ULL;  // already in the set.
4168 
4169     // If it was new, see if all the input strings are the same length.
4170     uint64_t LenSoFar = ~0ULL;
4171     for (Value *IncValue : PN->incoming_values()) {
4172       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4173       if (Len == 0) return 0; // Unknown length -> unknown.
4174 
4175       if (Len == ~0ULL) continue;
4176 
4177       if (Len != LenSoFar && LenSoFar != ~0ULL)
4178         return 0;    // Disagree -> unknown.
4179       LenSoFar = Len;
4180     }
4181 
4182     // Success, all agree.
4183     return LenSoFar;
4184   }
4185 
4186   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4187   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4188     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4189     if (Len1 == 0) return 0;
4190     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4191     if (Len2 == 0) return 0;
4192     if (Len1 == ~0ULL) return Len2;
4193     if (Len2 == ~0ULL) return Len1;
4194     if (Len1 != Len2) return 0;
4195     return Len1;
4196   }
4197 
4198   // Otherwise, see if we can read the string.
4199   ConstantDataArraySlice Slice;
4200   if (!getConstantDataArrayInfo(V, Slice, CharSize))
4201     return 0;
4202 
4203   if (Slice.Array == nullptr)
4204     return 1;
4205 
4206   // Search for nul characters
4207   unsigned NullIndex = 0;
4208   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4209     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4210       break;
4211   }
4212 
4213   return NullIndex + 1;
4214 }
4215 
4216 /// If we can compute the length of the string pointed to by
4217 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4218 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4219   if (!V->getType()->isPointerTy())
4220     return 0;
4221 
4222   SmallPtrSet<const PHINode*, 32> PHIs;
4223   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4224   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4225   // an empty string as a length.
4226   return Len == ~0ULL ? 1 : Len;
4227 }
4228 
4229 const Value *
4230 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4231                                            bool MustPreserveNullness) {
4232   assert(Call &&
4233          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4234   if (const Value *RV = Call->getReturnedArgOperand())
4235     return RV;
4236   // This can be used only as a aliasing property.
4237   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4238           Call, MustPreserveNullness))
4239     return Call->getArgOperand(0);
4240   return nullptr;
4241 }
4242 
4243 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4244     const CallBase *Call, bool MustPreserveNullness) {
4245   switch (Call->getIntrinsicID()) {
4246   case Intrinsic::launder_invariant_group:
4247   case Intrinsic::strip_invariant_group:
4248   case Intrinsic::aarch64_irg:
4249   case Intrinsic::aarch64_tagp:
4250     return true;
4251   case Intrinsic::ptrmask:
4252     return !MustPreserveNullness;
4253   default:
4254     return false;
4255   }
4256 }
4257 
4258 /// \p PN defines a loop-variant pointer to an object.  Check if the
4259 /// previous iteration of the loop was referring to the same object as \p PN.
4260 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4261                                          const LoopInfo *LI) {
4262   // Find the loop-defined value.
4263   Loop *L = LI->getLoopFor(PN->getParent());
4264   if (PN->getNumIncomingValues() != 2)
4265     return true;
4266 
4267   // Find the value from previous iteration.
4268   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4269   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4270     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4271   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4272     return true;
4273 
4274   // If a new pointer is loaded in the loop, the pointer references a different
4275   // object in every iteration.  E.g.:
4276   //    for (i)
4277   //       int *p = a[i];
4278   //       ...
4279   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4280     if (!L->isLoopInvariant(Load->getPointerOperand()))
4281       return false;
4282   return true;
4283 }
4284 
4285 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4286   if (!V->getType()->isPointerTy())
4287     return V;
4288   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4289     if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4290       V = GEP->getPointerOperand();
4291     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4292                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4293       V = cast<Operator>(V)->getOperand(0);
4294       if (!V->getType()->isPointerTy())
4295         return V;
4296     } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4297       if (GA->isInterposable())
4298         return V;
4299       V = GA->getAliasee();
4300     } else {
4301       if (auto *PHI = dyn_cast<PHINode>(V)) {
4302         // Look through single-arg phi nodes created by LCSSA.
4303         if (PHI->getNumIncomingValues() == 1) {
4304           V = PHI->getIncomingValue(0);
4305           continue;
4306         }
4307       } else if (auto *Call = dyn_cast<CallBase>(V)) {
4308         // CaptureTracking can know about special capturing properties of some
4309         // intrinsics like launder.invariant.group, that can't be expressed with
4310         // the attributes, but have properties like returning aliasing pointer.
4311         // Because some analysis may assume that nocaptured pointer is not
4312         // returned from some special intrinsic (because function would have to
4313         // be marked with returns attribute), it is crucial to use this function
4314         // because it should be in sync with CaptureTracking. Not using it may
4315         // cause weird miscompilations where 2 aliasing pointers are assumed to
4316         // noalias.
4317         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4318           V = RP;
4319           continue;
4320         }
4321       }
4322 
4323       return V;
4324     }
4325     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4326   }
4327   return V;
4328 }
4329 
4330 void llvm::getUnderlyingObjects(const Value *V,
4331                                 SmallVectorImpl<const Value *> &Objects,
4332                                 LoopInfo *LI, unsigned MaxLookup) {
4333   SmallPtrSet<const Value *, 4> Visited;
4334   SmallVector<const Value *, 4> Worklist;
4335   Worklist.push_back(V);
4336   do {
4337     const Value *P = Worklist.pop_back_val();
4338     P = getUnderlyingObject(P, MaxLookup);
4339 
4340     if (!Visited.insert(P).second)
4341       continue;
4342 
4343     if (auto *SI = dyn_cast<SelectInst>(P)) {
4344       Worklist.push_back(SI->getTrueValue());
4345       Worklist.push_back(SI->getFalseValue());
4346       continue;
4347     }
4348 
4349     if (auto *PN = dyn_cast<PHINode>(P)) {
4350       // If this PHI changes the underlying object in every iteration of the
4351       // loop, don't look through it.  Consider:
4352       //   int **A;
4353       //   for (i) {
4354       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
4355       //     Curr = A[i];
4356       //     *Prev, *Curr;
4357       //
4358       // Prev is tracking Curr one iteration behind so they refer to different
4359       // underlying objects.
4360       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4361           isSameUnderlyingObjectInLoop(PN, LI))
4362         append_range(Worklist, PN->incoming_values());
4363       continue;
4364     }
4365 
4366     Objects.push_back(P);
4367   } while (!Worklist.empty());
4368 }
4369 
4370 /// This is the function that does the work of looking through basic
4371 /// ptrtoint+arithmetic+inttoptr sequences.
4372 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4373   do {
4374     if (const Operator *U = dyn_cast<Operator>(V)) {
4375       // If we find a ptrtoint, we can transfer control back to the
4376       // regular getUnderlyingObjectFromInt.
4377       if (U->getOpcode() == Instruction::PtrToInt)
4378         return U->getOperand(0);
4379       // If we find an add of a constant, a multiplied value, or a phi, it's
4380       // likely that the other operand will lead us to the base
4381       // object. We don't have to worry about the case where the
4382       // object address is somehow being computed by the multiply,
4383       // because our callers only care when the result is an
4384       // identifiable object.
4385       if (U->getOpcode() != Instruction::Add ||
4386           (!isa<ConstantInt>(U->getOperand(1)) &&
4387            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4388            !isa<PHINode>(U->getOperand(1))))
4389         return V;
4390       V = U->getOperand(0);
4391     } else {
4392       return V;
4393     }
4394     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4395   } while (true);
4396 }
4397 
4398 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4399 /// ptrtoint+arithmetic+inttoptr sequences.
4400 /// It returns false if unidentified object is found in getUnderlyingObjects.
4401 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4402                                           SmallVectorImpl<Value *> &Objects) {
4403   SmallPtrSet<const Value *, 16> Visited;
4404   SmallVector<const Value *, 4> Working(1, V);
4405   do {
4406     V = Working.pop_back_val();
4407 
4408     SmallVector<const Value *, 4> Objs;
4409     getUnderlyingObjects(V, Objs);
4410 
4411     for (const Value *V : Objs) {
4412       if (!Visited.insert(V).second)
4413         continue;
4414       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4415         const Value *O =
4416           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4417         if (O->getType()->isPointerTy()) {
4418           Working.push_back(O);
4419           continue;
4420         }
4421       }
4422       // If getUnderlyingObjects fails to find an identifiable object,
4423       // getUnderlyingObjectsForCodeGen also fails for safety.
4424       if (!isIdentifiedObject(V)) {
4425         Objects.clear();
4426         return false;
4427       }
4428       Objects.push_back(const_cast<Value *>(V));
4429     }
4430   } while (!Working.empty());
4431   return true;
4432 }
4433 
4434 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4435   AllocaInst *Result = nullptr;
4436   SmallPtrSet<Value *, 4> Visited;
4437   SmallVector<Value *, 4> Worklist;
4438 
4439   auto AddWork = [&](Value *V) {
4440     if (Visited.insert(V).second)
4441       Worklist.push_back(V);
4442   };
4443 
4444   AddWork(V);
4445   do {
4446     V = Worklist.pop_back_val();
4447     assert(Visited.count(V));
4448 
4449     if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4450       if (Result && Result != AI)
4451         return nullptr;
4452       Result = AI;
4453     } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4454       AddWork(CI->getOperand(0));
4455     } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4456       for (Value *IncValue : PN->incoming_values())
4457         AddWork(IncValue);
4458     } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4459       AddWork(SI->getTrueValue());
4460       AddWork(SI->getFalseValue());
4461     } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4462       if (OffsetZero && !GEP->hasAllZeroIndices())
4463         return nullptr;
4464       AddWork(GEP->getPointerOperand());
4465     } else {
4466       return nullptr;
4467     }
4468   } while (!Worklist.empty());
4469 
4470   return Result;
4471 }
4472 
4473 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4474     const Value *V, bool AllowLifetime, bool AllowDroppable) {
4475   for (const User *U : V->users()) {
4476     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4477     if (!II)
4478       return false;
4479 
4480     if (AllowLifetime && II->isLifetimeStartOrEnd())
4481       continue;
4482 
4483     if (AllowDroppable && II->isDroppable())
4484       continue;
4485 
4486     return false;
4487   }
4488   return true;
4489 }
4490 
4491 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4492   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4493       V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4494 }
4495 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4496   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4497       V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4498 }
4499 
4500 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4501   if (!LI.isUnordered())
4502     return true;
4503   const Function &F = *LI.getFunction();
4504   // Speculative load may create a race that did not exist in the source.
4505   return F.hasFnAttribute(Attribute::SanitizeThread) ||
4506     // Speculative load may load data from dirty regions.
4507     F.hasFnAttribute(Attribute::SanitizeAddress) ||
4508     F.hasFnAttribute(Attribute::SanitizeHWAddress);
4509 }
4510 
4511 
4512 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4513                                         const Instruction *CtxI,
4514                                         const DominatorTree *DT,
4515                                         const TargetLibraryInfo *TLI) {
4516   const Operator *Inst = dyn_cast<Operator>(V);
4517   if (!Inst)
4518     return false;
4519 
4520   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4521     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4522       if (C->canTrap())
4523         return false;
4524 
4525   switch (Inst->getOpcode()) {
4526   default:
4527     return true;
4528   case Instruction::UDiv:
4529   case Instruction::URem: {
4530     // x / y is undefined if y == 0.
4531     const APInt *V;
4532     if (match(Inst->getOperand(1), m_APInt(V)))
4533       return *V != 0;
4534     return false;
4535   }
4536   case Instruction::SDiv:
4537   case Instruction::SRem: {
4538     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4539     const APInt *Numerator, *Denominator;
4540     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4541       return false;
4542     // We cannot hoist this division if the denominator is 0.
4543     if (*Denominator == 0)
4544       return false;
4545     // It's safe to hoist if the denominator is not 0 or -1.
4546     if (!Denominator->isAllOnesValue())
4547       return true;
4548     // At this point we know that the denominator is -1.  It is safe to hoist as
4549     // long we know that the numerator is not INT_MIN.
4550     if (match(Inst->getOperand(0), m_APInt(Numerator)))
4551       return !Numerator->isMinSignedValue();
4552     // The numerator *might* be MinSignedValue.
4553     return false;
4554   }
4555   case Instruction::Load: {
4556     const LoadInst *LI = cast<LoadInst>(Inst);
4557     if (mustSuppressSpeculation(*LI))
4558       return false;
4559     const DataLayout &DL = LI->getModule()->getDataLayout();
4560     return isDereferenceableAndAlignedPointer(
4561         LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4562         DL, CtxI, DT, TLI);
4563   }
4564   case Instruction::Call: {
4565     auto *CI = cast<const CallInst>(Inst);
4566     const Function *Callee = CI->getCalledFunction();
4567 
4568     // The called function could have undefined behavior or side-effects, even
4569     // if marked readnone nounwind.
4570     return Callee && Callee->isSpeculatable();
4571   }
4572   case Instruction::VAArg:
4573   case Instruction::Alloca:
4574   case Instruction::Invoke:
4575   case Instruction::CallBr:
4576   case Instruction::PHI:
4577   case Instruction::Store:
4578   case Instruction::Ret:
4579   case Instruction::Br:
4580   case Instruction::IndirectBr:
4581   case Instruction::Switch:
4582   case Instruction::Unreachable:
4583   case Instruction::Fence:
4584   case Instruction::AtomicRMW:
4585   case Instruction::AtomicCmpXchg:
4586   case Instruction::LandingPad:
4587   case Instruction::Resume:
4588   case Instruction::CatchSwitch:
4589   case Instruction::CatchPad:
4590   case Instruction::CatchRet:
4591   case Instruction::CleanupPad:
4592   case Instruction::CleanupRet:
4593     return false; // Misc instructions which have effects
4594   }
4595 }
4596 
4597 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4598   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4599 }
4600 
4601 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4602 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4603   switch (OR) {
4604     case ConstantRange::OverflowResult::MayOverflow:
4605       return OverflowResult::MayOverflow;
4606     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4607       return OverflowResult::AlwaysOverflowsLow;
4608     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4609       return OverflowResult::AlwaysOverflowsHigh;
4610     case ConstantRange::OverflowResult::NeverOverflows:
4611       return OverflowResult::NeverOverflows;
4612   }
4613   llvm_unreachable("Unknown OverflowResult");
4614 }
4615 
4616 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4617 static ConstantRange computeConstantRangeIncludingKnownBits(
4618     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4619     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4620     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4621   KnownBits Known = computeKnownBits(
4622       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4623   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4624   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4625   ConstantRange::PreferredRangeType RangeType =
4626       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4627   return CR1.intersectWith(CR2, RangeType);
4628 }
4629 
4630 OverflowResult llvm::computeOverflowForUnsignedMul(
4631     const Value *LHS, const Value *RHS, const DataLayout &DL,
4632     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4633     bool UseInstrInfo) {
4634   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4635                                         nullptr, UseInstrInfo);
4636   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4637                                         nullptr, UseInstrInfo);
4638   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4639   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4640   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4641 }
4642 
4643 OverflowResult
4644 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4645                                   const DataLayout &DL, AssumptionCache *AC,
4646                                   const Instruction *CxtI,
4647                                   const DominatorTree *DT, bool UseInstrInfo) {
4648   // Multiplying n * m significant bits yields a result of n + m significant
4649   // bits. If the total number of significant bits does not exceed the
4650   // result bit width (minus 1), there is no overflow.
4651   // This means if we have enough leading sign bits in the operands
4652   // we can guarantee that the result does not overflow.
4653   // Ref: "Hacker's Delight" by Henry Warren
4654   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4655 
4656   // Note that underestimating the number of sign bits gives a more
4657   // conservative answer.
4658   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4659                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4660 
4661   // First handle the easy case: if we have enough sign bits there's
4662   // definitely no overflow.
4663   if (SignBits > BitWidth + 1)
4664     return OverflowResult::NeverOverflows;
4665 
4666   // There are two ambiguous cases where there can be no overflow:
4667   //   SignBits == BitWidth + 1    and
4668   //   SignBits == BitWidth
4669   // The second case is difficult to check, therefore we only handle the
4670   // first case.
4671   if (SignBits == BitWidth + 1) {
4672     // It overflows only when both arguments are negative and the true
4673     // product is exactly the minimum negative number.
4674     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4675     // For simplicity we just check if at least one side is not negative.
4676     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4677                                           nullptr, UseInstrInfo);
4678     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4679                                           nullptr, UseInstrInfo);
4680     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4681       return OverflowResult::NeverOverflows;
4682   }
4683   return OverflowResult::MayOverflow;
4684 }
4685 
4686 OverflowResult llvm::computeOverflowForUnsignedAdd(
4687     const Value *LHS, const Value *RHS, const DataLayout &DL,
4688     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4689     bool UseInstrInfo) {
4690   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4691       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4692       nullptr, UseInstrInfo);
4693   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4694       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4695       nullptr, UseInstrInfo);
4696   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4697 }
4698 
4699 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4700                                                   const Value *RHS,
4701                                                   const AddOperator *Add,
4702                                                   const DataLayout &DL,
4703                                                   AssumptionCache *AC,
4704                                                   const Instruction *CxtI,
4705                                                   const DominatorTree *DT) {
4706   if (Add && Add->hasNoSignedWrap()) {
4707     return OverflowResult::NeverOverflows;
4708   }
4709 
4710   // If LHS and RHS each have at least two sign bits, the addition will look
4711   // like
4712   //
4713   // XX..... +
4714   // YY.....
4715   //
4716   // If the carry into the most significant position is 0, X and Y can't both
4717   // be 1 and therefore the carry out of the addition is also 0.
4718   //
4719   // If the carry into the most significant position is 1, X and Y can't both
4720   // be 0 and therefore the carry out of the addition is also 1.
4721   //
4722   // Since the carry into the most significant position is always equal to
4723   // the carry out of the addition, there is no signed overflow.
4724   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4725       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4726     return OverflowResult::NeverOverflows;
4727 
4728   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4729       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4730   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4731       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4732   OverflowResult OR =
4733       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4734   if (OR != OverflowResult::MayOverflow)
4735     return OR;
4736 
4737   // The remaining code needs Add to be available. Early returns if not so.
4738   if (!Add)
4739     return OverflowResult::MayOverflow;
4740 
4741   // If the sign of Add is the same as at least one of the operands, this add
4742   // CANNOT overflow. If this can be determined from the known bits of the
4743   // operands the above signedAddMayOverflow() check will have already done so.
4744   // The only other way to improve on the known bits is from an assumption, so
4745   // call computeKnownBitsFromAssume() directly.
4746   bool LHSOrRHSKnownNonNegative =
4747       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4748   bool LHSOrRHSKnownNegative =
4749       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4750   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4751     KnownBits AddKnown(LHSRange.getBitWidth());
4752     computeKnownBitsFromAssume(
4753         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4754     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4755         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4756       return OverflowResult::NeverOverflows;
4757   }
4758 
4759   return OverflowResult::MayOverflow;
4760 }
4761 
4762 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4763                                                    const Value *RHS,
4764                                                    const DataLayout &DL,
4765                                                    AssumptionCache *AC,
4766                                                    const Instruction *CxtI,
4767                                                    const DominatorTree *DT) {
4768   // Checking for conditions implied by dominating conditions may be expensive.
4769   // Limit it to usub_with_overflow calls for now.
4770   if (match(CxtI,
4771             m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4772     if (auto C =
4773             isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4774       if (*C)
4775         return OverflowResult::NeverOverflows;
4776       return OverflowResult::AlwaysOverflowsLow;
4777     }
4778   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4779       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4780   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4781       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4782   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4783 }
4784 
4785 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4786                                                  const Value *RHS,
4787                                                  const DataLayout &DL,
4788                                                  AssumptionCache *AC,
4789                                                  const Instruction *CxtI,
4790                                                  const DominatorTree *DT) {
4791   // If LHS and RHS each have at least two sign bits, the subtraction
4792   // cannot overflow.
4793   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4794       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4795     return OverflowResult::NeverOverflows;
4796 
4797   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4798       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4799   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4800       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4801   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4802 }
4803 
4804 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4805                                      const DominatorTree &DT) {
4806   SmallVector<const BranchInst *, 2> GuardingBranches;
4807   SmallVector<const ExtractValueInst *, 2> Results;
4808 
4809   for (const User *U : WO->users()) {
4810     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4811       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4812 
4813       if (EVI->getIndices()[0] == 0)
4814         Results.push_back(EVI);
4815       else {
4816         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4817 
4818         for (const auto *U : EVI->users())
4819           if (const auto *B = dyn_cast<BranchInst>(U)) {
4820             assert(B->isConditional() && "How else is it using an i1?");
4821             GuardingBranches.push_back(B);
4822           }
4823       }
4824     } else {
4825       // We are using the aggregate directly in a way we don't want to analyze
4826       // here (storing it to a global, say).
4827       return false;
4828     }
4829   }
4830 
4831   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4832     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4833     if (!NoWrapEdge.isSingleEdge())
4834       return false;
4835 
4836     // Check if all users of the add are provably no-wrap.
4837     for (const auto *Result : Results) {
4838       // If the extractvalue itself is not executed on overflow, the we don't
4839       // need to check each use separately, since domination is transitive.
4840       if (DT.dominates(NoWrapEdge, Result->getParent()))
4841         continue;
4842 
4843       for (auto &RU : Result->uses())
4844         if (!DT.dominates(NoWrapEdge, RU))
4845           return false;
4846     }
4847 
4848     return true;
4849   };
4850 
4851   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4852 }
4853 
4854 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4855   // See whether I has flags that may create poison
4856   if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4857     if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4858       return true;
4859   }
4860   if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4861     if (ExactOp->isExact())
4862       return true;
4863   if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4864     auto FMF = FP->getFastMathFlags();
4865     if (FMF.noNaNs() || FMF.noInfs())
4866       return true;
4867   }
4868 
4869   unsigned Opcode = Op->getOpcode();
4870 
4871   // Check whether opcode is a poison/undef-generating operation
4872   switch (Opcode) {
4873   case Instruction::Shl:
4874   case Instruction::AShr:
4875   case Instruction::LShr: {
4876     // Shifts return poison if shiftwidth is larger than the bitwidth.
4877     if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4878       SmallVector<Constant *, 4> ShiftAmounts;
4879       if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4880         unsigned NumElts = FVTy->getNumElements();
4881         for (unsigned i = 0; i < NumElts; ++i)
4882           ShiftAmounts.push_back(C->getAggregateElement(i));
4883       } else if (isa<ScalableVectorType>(C->getType()))
4884         return true; // Can't tell, just return true to be safe
4885       else
4886         ShiftAmounts.push_back(C);
4887 
4888       bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4889         auto *CI = dyn_cast_or_null<ConstantInt>(C);
4890         return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4891       });
4892       return !Safe;
4893     }
4894     return true;
4895   }
4896   case Instruction::FPToSI:
4897   case Instruction::FPToUI:
4898     // fptosi/ui yields poison if the resulting value does not fit in the
4899     // destination type.
4900     return true;
4901   case Instruction::Call:
4902     if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
4903       switch (II->getIntrinsicID()) {
4904       // TODO: Add more intrinsics.
4905       case Intrinsic::ctpop:
4906       case Intrinsic::sadd_with_overflow:
4907       case Intrinsic::ssub_with_overflow:
4908       case Intrinsic::smul_with_overflow:
4909       case Intrinsic::uadd_with_overflow:
4910       case Intrinsic::usub_with_overflow:
4911       case Intrinsic::umul_with_overflow:
4912         return false;
4913       }
4914     }
4915     LLVM_FALLTHROUGH;
4916   case Instruction::CallBr:
4917   case Instruction::Invoke: {
4918     const auto *CB = cast<CallBase>(Op);
4919     return !CB->hasRetAttr(Attribute::NoUndef);
4920   }
4921   case Instruction::InsertElement:
4922   case Instruction::ExtractElement: {
4923     // If index exceeds the length of the vector, it returns poison
4924     auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4925     unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4926     auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4927     if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
4928       return true;
4929     return false;
4930   }
4931   case Instruction::ShuffleVector: {
4932     // shufflevector may return undef.
4933     if (PoisonOnly)
4934       return false;
4935     ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4936                              ? cast<ConstantExpr>(Op)->getShuffleMask()
4937                              : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4938     return is_contained(Mask, UndefMaskElem);
4939   }
4940   case Instruction::FNeg:
4941   case Instruction::PHI:
4942   case Instruction::Select:
4943   case Instruction::URem:
4944   case Instruction::SRem:
4945   case Instruction::ExtractValue:
4946   case Instruction::InsertValue:
4947   case Instruction::Freeze:
4948   case Instruction::ICmp:
4949   case Instruction::FCmp:
4950     return false;
4951   case Instruction::GetElementPtr: {
4952     const auto *GEP = cast<GEPOperator>(Op);
4953     return GEP->isInBounds();
4954   }
4955   default: {
4956     const auto *CE = dyn_cast<ConstantExpr>(Op);
4957     if (isa<CastInst>(Op) || (CE && CE->isCast()))
4958       return false;
4959     else if (Instruction::isBinaryOp(Opcode))
4960       return false;
4961     // Be conservative and return true.
4962     return true;
4963   }
4964   }
4965 }
4966 
4967 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
4968   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
4969 }
4970 
4971 bool llvm::canCreatePoison(const Operator *Op) {
4972   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
4973 }
4974 
4975 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
4976                                   const Value *V, unsigned Depth) {
4977   if (ValAssumedPoison == V)
4978     return true;
4979 
4980   const unsigned MaxDepth = 2;
4981   if (Depth >= MaxDepth)
4982     return false;
4983 
4984   if (const auto *I = dyn_cast<Instruction>(V)) {
4985     if (propagatesPoison(cast<Operator>(I)))
4986       return any_of(I->operands(), [=](const Value *Op) {
4987         return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
4988       });
4989 
4990     // 'select ValAssumedPoison, _, _' is poison.
4991     if (const auto *SI = dyn_cast<SelectInst>(I))
4992       return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
4993                                    Depth + 1);
4994     // V  = extractvalue V0, idx
4995     // V2 = extractvalue V0, idx2
4996     // V0's elements are all poison or not. (e.g., add_with_overflow)
4997     const WithOverflowInst *II;
4998     if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
4999         match(ValAssumedPoison, m_ExtractValue(m_Specific(II))))
5000       return true;
5001   }
5002   return false;
5003 }
5004 
5005 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
5006                           unsigned Depth) {
5007   if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
5008     return true;
5009 
5010   if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
5011     return true;
5012 
5013   const unsigned MaxDepth = 2;
5014   if (Depth >= MaxDepth)
5015     return false;
5016 
5017   const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
5018   if (I && !canCreatePoison(cast<Operator>(I))) {
5019     return all_of(I->operands(), [=](const Value *Op) {
5020       return impliesPoison(Op, V, Depth + 1);
5021     });
5022   }
5023   return false;
5024 }
5025 
5026 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
5027   return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
5028 }
5029 
5030 static bool programUndefinedIfUndefOrPoison(const Value *V,
5031                                             bool PoisonOnly);
5032 
5033 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
5034                                              AssumptionCache *AC,
5035                                              const Instruction *CtxI,
5036                                              const DominatorTree *DT,
5037                                              unsigned Depth, bool PoisonOnly) {
5038   if (Depth >= MaxAnalysisRecursionDepth)
5039     return false;
5040 
5041   if (isa<MetadataAsValue>(V))
5042     return false;
5043 
5044   if (const auto *A = dyn_cast<Argument>(V)) {
5045     if (A->hasAttribute(Attribute::NoUndef))
5046       return true;
5047   }
5048 
5049   if (auto *C = dyn_cast<Constant>(V)) {
5050     if (isa<UndefValue>(C))
5051       return PoisonOnly && !isa<PoisonValue>(C);
5052 
5053     if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
5054         isa<ConstantPointerNull>(C) || isa<Function>(C))
5055       return true;
5056 
5057     if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
5058       return (PoisonOnly ? !C->containsPoisonElement()
5059                          : !C->containsUndefOrPoisonElement()) &&
5060              !C->containsConstantExpression();
5061   }
5062 
5063   // Strip cast operations from a pointer value.
5064   // Note that stripPointerCastsSameRepresentation can strip off getelementptr
5065   // inbounds with zero offset. To guarantee that the result isn't poison, the
5066   // stripped pointer is checked as it has to be pointing into an allocated
5067   // object or be null `null` to ensure `inbounds` getelement pointers with a
5068   // zero offset could not produce poison.
5069   // It can strip off addrspacecast that do not change bit representation as
5070   // well. We believe that such addrspacecast is equivalent to no-op.
5071   auto *StrippedV = V->stripPointerCastsSameRepresentation();
5072   if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
5073       isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
5074     return true;
5075 
5076   auto OpCheck = [&](const Value *V) {
5077     return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
5078                                             PoisonOnly);
5079   };
5080 
5081   if (auto *Opr = dyn_cast<Operator>(V)) {
5082     // If the value is a freeze instruction, then it can never
5083     // be undef or poison.
5084     if (isa<FreezeInst>(V))
5085       return true;
5086 
5087     if (const auto *CB = dyn_cast<CallBase>(V)) {
5088       if (CB->hasRetAttr(Attribute::NoUndef))
5089         return true;
5090     }
5091 
5092     if (const auto *PN = dyn_cast<PHINode>(V)) {
5093       unsigned Num = PN->getNumIncomingValues();
5094       bool IsWellDefined = true;
5095       for (unsigned i = 0; i < Num; ++i) {
5096         auto *TI = PN->getIncomingBlock(i)->getTerminator();
5097         if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
5098                                               DT, Depth + 1, PoisonOnly)) {
5099           IsWellDefined = false;
5100           break;
5101         }
5102       }
5103       if (IsWellDefined)
5104         return true;
5105     } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
5106       return true;
5107   }
5108 
5109   if (auto *I = dyn_cast<LoadInst>(V))
5110     if (I->getMetadata(LLVMContext::MD_noundef))
5111       return true;
5112 
5113   if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
5114     return true;
5115 
5116   // CxtI may be null or a cloned instruction.
5117   if (!CtxI || !CtxI->getParent() || !DT)
5118     return false;
5119 
5120   auto *DNode = DT->getNode(CtxI->getParent());
5121   if (!DNode)
5122     // Unreachable block
5123     return false;
5124 
5125   // If V is used as a branch condition before reaching CtxI, V cannot be
5126   // undef or poison.
5127   //   br V, BB1, BB2
5128   // BB1:
5129   //   CtxI ; V cannot be undef or poison here
5130   auto *Dominator = DNode->getIDom();
5131   while (Dominator) {
5132     auto *TI = Dominator->getBlock()->getTerminator();
5133 
5134     Value *Cond = nullptr;
5135     if (auto BI = dyn_cast<BranchInst>(TI)) {
5136       if (BI->isConditional())
5137         Cond = BI->getCondition();
5138     } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
5139       Cond = SI->getCondition();
5140     }
5141 
5142     if (Cond) {
5143       if (Cond == V)
5144         return true;
5145       else if (PoisonOnly && isa<Operator>(Cond)) {
5146         // For poison, we can analyze further
5147         auto *Opr = cast<Operator>(Cond);
5148         if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
5149           return true;
5150       }
5151     }
5152 
5153     Dominator = Dominator->getIDom();
5154   }
5155 
5156   SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NoUndef};
5157   if (getKnowledgeValidInContext(V, AttrKinds, CtxI, DT, AC))
5158     return true;
5159 
5160   return false;
5161 }
5162 
5163 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5164                                             const Instruction *CtxI,
5165                                             const DominatorTree *DT,
5166                                             unsigned Depth) {
5167   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5168 }
5169 
5170 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5171                                      const Instruction *CtxI,
5172                                      const DominatorTree *DT, unsigned Depth) {
5173   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5174 }
5175 
5176 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5177                                                  const DataLayout &DL,
5178                                                  AssumptionCache *AC,
5179                                                  const Instruction *CxtI,
5180                                                  const DominatorTree *DT) {
5181   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5182                                        Add, DL, AC, CxtI, DT);
5183 }
5184 
5185 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5186                                                  const Value *RHS,
5187                                                  const DataLayout &DL,
5188                                                  AssumptionCache *AC,
5189                                                  const Instruction *CxtI,
5190                                                  const DominatorTree *DT) {
5191   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5192 }
5193 
5194 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5195   // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5196   // of time because it's possible for another thread to interfere with it for an
5197   // arbitrary length of time, but programs aren't allowed to rely on that.
5198 
5199   // If there is no successor, then execution can't transfer to it.
5200   if (isa<ReturnInst>(I))
5201     return false;
5202   if (isa<UnreachableInst>(I))
5203     return false;
5204 
5205   // An instruction that returns without throwing must transfer control flow
5206   // to a successor.
5207   return !I->mayThrow() && I->willReturn();
5208 }
5209 
5210 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5211   // TODO: This is slightly conservative for invoke instruction since exiting
5212   // via an exception *is* normal control for them.
5213   for (const Instruction &I : *BB)
5214     if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5215       return false;
5216   return true;
5217 }
5218 
5219 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5220                                                   const Loop *L) {
5221   // The loop header is guaranteed to be executed for every iteration.
5222   //
5223   // FIXME: Relax this constraint to cover all basic blocks that are
5224   // guaranteed to be executed at every iteration.
5225   if (I->getParent() != L->getHeader()) return false;
5226 
5227   for (const Instruction &LI : *L->getHeader()) {
5228     if (&LI == I) return true;
5229     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5230   }
5231   llvm_unreachable("Instruction not contained in its own parent basic block.");
5232 }
5233 
5234 bool llvm::propagatesPoison(const Operator *I) {
5235   switch (I->getOpcode()) {
5236   case Instruction::Freeze:
5237   case Instruction::Select:
5238   case Instruction::PHI:
5239   case Instruction::Invoke:
5240     return false;
5241   case Instruction::Call:
5242     if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5243       switch (II->getIntrinsicID()) {
5244       // TODO: Add more intrinsics.
5245       case Intrinsic::sadd_with_overflow:
5246       case Intrinsic::ssub_with_overflow:
5247       case Intrinsic::smul_with_overflow:
5248       case Intrinsic::uadd_with_overflow:
5249       case Intrinsic::usub_with_overflow:
5250       case Intrinsic::umul_with_overflow:
5251         // If an input is a vector containing a poison element, the
5252         // two output vectors (calculated results, overflow bits)'
5253         // corresponding lanes are poison.
5254         return true;
5255       }
5256     }
5257     return false;
5258   case Instruction::ICmp:
5259   case Instruction::FCmp:
5260   case Instruction::GetElementPtr:
5261     return true;
5262   default:
5263     if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5264       return true;
5265 
5266     // Be conservative and return false.
5267     return false;
5268   }
5269 }
5270 
5271 void llvm::getGuaranteedWellDefinedOps(
5272     const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
5273   switch (I->getOpcode()) {
5274     case Instruction::Store:
5275       Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5276       break;
5277 
5278     case Instruction::Load:
5279       Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5280       break;
5281 
5282     // Since dereferenceable attribute imply noundef, atomic operations
5283     // also implicitly have noundef pointers too
5284     case Instruction::AtomicCmpXchg:
5285       Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5286       break;
5287 
5288     case Instruction::AtomicRMW:
5289       Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5290       break;
5291 
5292     case Instruction::Call:
5293     case Instruction::Invoke: {
5294       const CallBase *CB = cast<CallBase>(I);
5295       if (CB->isIndirectCall())
5296         Operands.insert(CB->getCalledOperand());
5297       for (unsigned i = 0; i < CB->arg_size(); ++i) {
5298         if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5299             CB->paramHasAttr(i, Attribute::Dereferenceable))
5300           Operands.insert(CB->getArgOperand(i));
5301       }
5302       break;
5303     }
5304 
5305     default:
5306       break;
5307   }
5308 }
5309 
5310 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5311                                      SmallPtrSetImpl<const Value *> &Operands) {
5312   getGuaranteedWellDefinedOps(I, Operands);
5313   switch (I->getOpcode()) {
5314   // Divisors of these operations are allowed to be partially undef.
5315   case Instruction::UDiv:
5316   case Instruction::SDiv:
5317   case Instruction::URem:
5318   case Instruction::SRem:
5319     Operands.insert(I->getOperand(1));
5320     break;
5321 
5322   default:
5323     break;
5324   }
5325 }
5326 
5327 bool llvm::mustTriggerUB(const Instruction *I,
5328                          const SmallSet<const Value *, 16>& KnownPoison) {
5329   SmallPtrSet<const Value *, 4> NonPoisonOps;
5330   getGuaranteedNonPoisonOps(I, NonPoisonOps);
5331 
5332   for (const auto *V : NonPoisonOps)
5333     if (KnownPoison.count(V))
5334       return true;
5335 
5336   return false;
5337 }
5338 
5339 static bool programUndefinedIfUndefOrPoison(const Value *V,
5340                                             bool PoisonOnly) {
5341   // We currently only look for uses of values within the same basic
5342   // block, as that makes it easier to guarantee that the uses will be
5343   // executed given that Inst is executed.
5344   //
5345   // FIXME: Expand this to consider uses beyond the same basic block. To do
5346   // this, look out for the distinction between post-dominance and strong
5347   // post-dominance.
5348   const BasicBlock *BB = nullptr;
5349   BasicBlock::const_iterator Begin;
5350   if (const auto *Inst = dyn_cast<Instruction>(V)) {
5351     BB = Inst->getParent();
5352     Begin = Inst->getIterator();
5353     Begin++;
5354   } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5355     BB = &Arg->getParent()->getEntryBlock();
5356     Begin = BB->begin();
5357   } else {
5358     return false;
5359   }
5360 
5361   BasicBlock::const_iterator End = BB->end();
5362 
5363   if (!PoisonOnly) {
5364     // Since undef does not propagate eagerly, be conservative & just check
5365     // whether a value is directly passed to an instruction that must take
5366     // well-defined operands.
5367 
5368     for (auto &I : make_range(Begin, End)) {
5369       SmallPtrSet<const Value *, 4> WellDefinedOps;
5370       getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5371       for (auto *Op : WellDefinedOps) {
5372         if (Op == V)
5373           return true;
5374       }
5375       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5376         break;
5377     }
5378     return false;
5379   }
5380 
5381   // Set of instructions that we have proved will yield poison if Inst
5382   // does.
5383   SmallSet<const Value *, 16> YieldsPoison;
5384   SmallSet<const BasicBlock *, 4> Visited;
5385 
5386   YieldsPoison.insert(V);
5387   auto Propagate = [&](const User *User) {
5388     if (propagatesPoison(cast<Operator>(User)))
5389       YieldsPoison.insert(User);
5390   };
5391   for_each(V->users(), Propagate);
5392   Visited.insert(BB);
5393 
5394   unsigned Iter = 0;
5395   while (Iter++ < MaxAnalysisRecursionDepth) {
5396     for (auto &I : make_range(Begin, End)) {
5397       if (mustTriggerUB(&I, YieldsPoison))
5398         return true;
5399       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5400         return false;
5401 
5402       // Mark poison that propagates from I through uses of I.
5403       if (YieldsPoison.count(&I))
5404         for_each(I.users(), Propagate);
5405     }
5406 
5407     if (auto *NextBB = BB->getSingleSuccessor()) {
5408       if (Visited.insert(NextBB).second) {
5409         BB = NextBB;
5410         Begin = BB->getFirstNonPHI()->getIterator();
5411         End = BB->end();
5412         continue;
5413       }
5414     }
5415 
5416     break;
5417   }
5418   return false;
5419 }
5420 
5421 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5422   return ::programUndefinedIfUndefOrPoison(Inst, false);
5423 }
5424 
5425 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5426   return ::programUndefinedIfUndefOrPoison(Inst, true);
5427 }
5428 
5429 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5430   if (FMF.noNaNs())
5431     return true;
5432 
5433   if (auto *C = dyn_cast<ConstantFP>(V))
5434     return !C->isNaN();
5435 
5436   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5437     if (!C->getElementType()->isFloatingPointTy())
5438       return false;
5439     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5440       if (C->getElementAsAPFloat(I).isNaN())
5441         return false;
5442     }
5443     return true;
5444   }
5445 
5446   if (isa<ConstantAggregateZero>(V))
5447     return true;
5448 
5449   return false;
5450 }
5451 
5452 static bool isKnownNonZero(const Value *V) {
5453   if (auto *C = dyn_cast<ConstantFP>(V))
5454     return !C->isZero();
5455 
5456   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5457     if (!C->getElementType()->isFloatingPointTy())
5458       return false;
5459     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5460       if (C->getElementAsAPFloat(I).isZero())
5461         return false;
5462     }
5463     return true;
5464   }
5465 
5466   return false;
5467 }
5468 
5469 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5470 /// Given non-min/max outer cmp/select from the clamp pattern this
5471 /// function recognizes if it can be substitued by a "canonical" min/max
5472 /// pattern.
5473 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5474                                                Value *CmpLHS, Value *CmpRHS,
5475                                                Value *TrueVal, Value *FalseVal,
5476                                                Value *&LHS, Value *&RHS) {
5477   // Try to match
5478   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5479   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5480   // and return description of the outer Max/Min.
5481 
5482   // First, check if select has inverse order:
5483   if (CmpRHS == FalseVal) {
5484     std::swap(TrueVal, FalseVal);
5485     Pred = CmpInst::getInversePredicate(Pred);
5486   }
5487 
5488   // Assume success now. If there's no match, callers should not use these anyway.
5489   LHS = TrueVal;
5490   RHS = FalseVal;
5491 
5492   const APFloat *FC1;
5493   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5494     return {SPF_UNKNOWN, SPNB_NA, false};
5495 
5496   const APFloat *FC2;
5497   switch (Pred) {
5498   case CmpInst::FCMP_OLT:
5499   case CmpInst::FCMP_OLE:
5500   case CmpInst::FCMP_ULT:
5501   case CmpInst::FCMP_ULE:
5502     if (match(FalseVal,
5503               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5504                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5505         *FC1 < *FC2)
5506       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5507     break;
5508   case CmpInst::FCMP_OGT:
5509   case CmpInst::FCMP_OGE:
5510   case CmpInst::FCMP_UGT:
5511   case CmpInst::FCMP_UGE:
5512     if (match(FalseVal,
5513               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5514                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5515         *FC1 > *FC2)
5516       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5517     break;
5518   default:
5519     break;
5520   }
5521 
5522   return {SPF_UNKNOWN, SPNB_NA, false};
5523 }
5524 
5525 /// Recognize variations of:
5526 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5527 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5528                                       Value *CmpLHS, Value *CmpRHS,
5529                                       Value *TrueVal, Value *FalseVal) {
5530   // Swap the select operands and predicate to match the patterns below.
5531   if (CmpRHS != TrueVal) {
5532     Pred = ICmpInst::getSwappedPredicate(Pred);
5533     std::swap(TrueVal, FalseVal);
5534   }
5535   const APInt *C1;
5536   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5537     const APInt *C2;
5538     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5539     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5540         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5541       return {SPF_SMAX, SPNB_NA, false};
5542 
5543     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5544     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5545         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5546       return {SPF_SMIN, SPNB_NA, false};
5547 
5548     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5549     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5550         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5551       return {SPF_UMAX, SPNB_NA, false};
5552 
5553     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5554     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5555         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5556       return {SPF_UMIN, SPNB_NA, false};
5557   }
5558   return {SPF_UNKNOWN, SPNB_NA, false};
5559 }
5560 
5561 /// Recognize variations of:
5562 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5563 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5564                                                Value *CmpLHS, Value *CmpRHS,
5565                                                Value *TVal, Value *FVal,
5566                                                unsigned Depth) {
5567   // TODO: Allow FP min/max with nnan/nsz.
5568   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5569 
5570   Value *A = nullptr, *B = nullptr;
5571   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5572   if (!SelectPatternResult::isMinOrMax(L.Flavor))
5573     return {SPF_UNKNOWN, SPNB_NA, false};
5574 
5575   Value *C = nullptr, *D = nullptr;
5576   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5577   if (L.Flavor != R.Flavor)
5578     return {SPF_UNKNOWN, SPNB_NA, false};
5579 
5580   // We have something like: x Pred y ? min(a, b) : min(c, d).
5581   // Try to match the compare to the min/max operations of the select operands.
5582   // First, make sure we have the right compare predicate.
5583   switch (L.Flavor) {
5584   case SPF_SMIN:
5585     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5586       Pred = ICmpInst::getSwappedPredicate(Pred);
5587       std::swap(CmpLHS, CmpRHS);
5588     }
5589     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5590       break;
5591     return {SPF_UNKNOWN, SPNB_NA, false};
5592   case SPF_SMAX:
5593     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5594       Pred = ICmpInst::getSwappedPredicate(Pred);
5595       std::swap(CmpLHS, CmpRHS);
5596     }
5597     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5598       break;
5599     return {SPF_UNKNOWN, SPNB_NA, false};
5600   case SPF_UMIN:
5601     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5602       Pred = ICmpInst::getSwappedPredicate(Pred);
5603       std::swap(CmpLHS, CmpRHS);
5604     }
5605     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5606       break;
5607     return {SPF_UNKNOWN, SPNB_NA, false};
5608   case SPF_UMAX:
5609     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5610       Pred = ICmpInst::getSwappedPredicate(Pred);
5611       std::swap(CmpLHS, CmpRHS);
5612     }
5613     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5614       break;
5615     return {SPF_UNKNOWN, SPNB_NA, false};
5616   default:
5617     return {SPF_UNKNOWN, SPNB_NA, false};
5618   }
5619 
5620   // If there is a common operand in the already matched min/max and the other
5621   // min/max operands match the compare operands (either directly or inverted),
5622   // then this is min/max of the same flavor.
5623 
5624   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5625   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5626   if (D == B) {
5627     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5628                                          match(A, m_Not(m_Specific(CmpRHS)))))
5629       return {L.Flavor, SPNB_NA, false};
5630   }
5631   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5632   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5633   if (C == B) {
5634     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5635                                          match(A, m_Not(m_Specific(CmpRHS)))))
5636       return {L.Flavor, SPNB_NA, false};
5637   }
5638   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5639   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5640   if (D == A) {
5641     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5642                                          match(B, m_Not(m_Specific(CmpRHS)))))
5643       return {L.Flavor, SPNB_NA, false};
5644   }
5645   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5646   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5647   if (C == A) {
5648     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5649                                          match(B, m_Not(m_Specific(CmpRHS)))))
5650       return {L.Flavor, SPNB_NA, false};
5651   }
5652 
5653   return {SPF_UNKNOWN, SPNB_NA, false};
5654 }
5655 
5656 /// If the input value is the result of a 'not' op, constant integer, or vector
5657 /// splat of a constant integer, return the bitwise-not source value.
5658 /// TODO: This could be extended to handle non-splat vector integer constants.
5659 static Value *getNotValue(Value *V) {
5660   Value *NotV;
5661   if (match(V, m_Not(m_Value(NotV))))
5662     return NotV;
5663 
5664   const APInt *C;
5665   if (match(V, m_APInt(C)))
5666     return ConstantInt::get(V->getType(), ~(*C));
5667 
5668   return nullptr;
5669 }
5670 
5671 /// Match non-obvious integer minimum and maximum sequences.
5672 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5673                                        Value *CmpLHS, Value *CmpRHS,
5674                                        Value *TrueVal, Value *FalseVal,
5675                                        Value *&LHS, Value *&RHS,
5676                                        unsigned Depth) {
5677   // Assume success. If there's no match, callers should not use these anyway.
5678   LHS = TrueVal;
5679   RHS = FalseVal;
5680 
5681   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5682   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5683     return SPR;
5684 
5685   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5686   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5687     return SPR;
5688 
5689   // Look through 'not' ops to find disguised min/max.
5690   // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5691   // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5692   if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5693     switch (Pred) {
5694     case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5695     case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5696     case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5697     case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5698     default: break;
5699     }
5700   }
5701 
5702   // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5703   // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5704   if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5705     switch (Pred) {
5706     case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5707     case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5708     case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5709     case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5710     default: break;
5711     }
5712   }
5713 
5714   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5715     return {SPF_UNKNOWN, SPNB_NA, false};
5716 
5717   // Z = X -nsw Y
5718   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5719   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5720   if (match(TrueVal, m_Zero()) &&
5721       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5722     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5723 
5724   // Z = X -nsw Y
5725   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5726   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5727   if (match(FalseVal, m_Zero()) &&
5728       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5729     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5730 
5731   const APInt *C1;
5732   if (!match(CmpRHS, m_APInt(C1)))
5733     return {SPF_UNKNOWN, SPNB_NA, false};
5734 
5735   // An unsigned min/max can be written with a signed compare.
5736   const APInt *C2;
5737   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5738       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5739     // Is the sign bit set?
5740     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5741     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5742     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5743         C2->isMaxSignedValue())
5744       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5745 
5746     // Is the sign bit clear?
5747     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5748     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5749     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5750         C2->isMinSignedValue())
5751       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5752   }
5753 
5754   return {SPF_UNKNOWN, SPNB_NA, false};
5755 }
5756 
5757 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5758   assert(X && Y && "Invalid operand");
5759 
5760   // X = sub (0, Y) || X = sub nsw (0, Y)
5761   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5762       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5763     return true;
5764 
5765   // Y = sub (0, X) || Y = sub nsw (0, X)
5766   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5767       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5768     return true;
5769 
5770   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5771   Value *A, *B;
5772   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5773                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5774          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5775                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5776 }
5777 
5778 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5779                                               FastMathFlags FMF,
5780                                               Value *CmpLHS, Value *CmpRHS,
5781                                               Value *TrueVal, Value *FalseVal,
5782                                               Value *&LHS, Value *&RHS,
5783                                               unsigned Depth) {
5784   if (CmpInst::isFPPredicate(Pred)) {
5785     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5786     // 0.0 operand, set the compare's 0.0 operands to that same value for the
5787     // purpose of identifying min/max. Disregard vector constants with undefined
5788     // elements because those can not be back-propagated for analysis.
5789     Value *OutputZeroVal = nullptr;
5790     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5791         !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
5792       OutputZeroVal = TrueVal;
5793     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5794              !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
5795       OutputZeroVal = FalseVal;
5796 
5797     if (OutputZeroVal) {
5798       if (match(CmpLHS, m_AnyZeroFP()))
5799         CmpLHS = OutputZeroVal;
5800       if (match(CmpRHS, m_AnyZeroFP()))
5801         CmpRHS = OutputZeroVal;
5802     }
5803   }
5804 
5805   LHS = CmpLHS;
5806   RHS = CmpRHS;
5807 
5808   // Signed zero may return inconsistent results between implementations.
5809   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5810   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5811   // Therefore, we behave conservatively and only proceed if at least one of the
5812   // operands is known to not be zero or if we don't care about signed zero.
5813   switch (Pred) {
5814   default: break;
5815   // FIXME: Include OGT/OLT/UGT/ULT.
5816   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5817   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5818     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5819         !isKnownNonZero(CmpRHS))
5820       return {SPF_UNKNOWN, SPNB_NA, false};
5821   }
5822 
5823   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5824   bool Ordered = false;
5825 
5826   // When given one NaN and one non-NaN input:
5827   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5828   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5829   //     ordered comparison fails), which could be NaN or non-NaN.
5830   // so here we discover exactly what NaN behavior is required/accepted.
5831   if (CmpInst::isFPPredicate(Pred)) {
5832     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5833     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5834 
5835     if (LHSSafe && RHSSafe) {
5836       // Both operands are known non-NaN.
5837       NaNBehavior = SPNB_RETURNS_ANY;
5838     } else if (CmpInst::isOrdered(Pred)) {
5839       // An ordered comparison will return false when given a NaN, so it
5840       // returns the RHS.
5841       Ordered = true;
5842       if (LHSSafe)
5843         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5844         NaNBehavior = SPNB_RETURNS_NAN;
5845       else if (RHSSafe)
5846         NaNBehavior = SPNB_RETURNS_OTHER;
5847       else
5848         // Completely unsafe.
5849         return {SPF_UNKNOWN, SPNB_NA, false};
5850     } else {
5851       Ordered = false;
5852       // An unordered comparison will return true when given a NaN, so it
5853       // returns the LHS.
5854       if (LHSSafe)
5855         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5856         NaNBehavior = SPNB_RETURNS_OTHER;
5857       else if (RHSSafe)
5858         NaNBehavior = SPNB_RETURNS_NAN;
5859       else
5860         // Completely unsafe.
5861         return {SPF_UNKNOWN, SPNB_NA, false};
5862     }
5863   }
5864 
5865   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5866     std::swap(CmpLHS, CmpRHS);
5867     Pred = CmpInst::getSwappedPredicate(Pred);
5868     if (NaNBehavior == SPNB_RETURNS_NAN)
5869       NaNBehavior = SPNB_RETURNS_OTHER;
5870     else if (NaNBehavior == SPNB_RETURNS_OTHER)
5871       NaNBehavior = SPNB_RETURNS_NAN;
5872     Ordered = !Ordered;
5873   }
5874 
5875   // ([if]cmp X, Y) ? X : Y
5876   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5877     switch (Pred) {
5878     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5879     case ICmpInst::ICMP_UGT:
5880     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5881     case ICmpInst::ICMP_SGT:
5882     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5883     case ICmpInst::ICMP_ULT:
5884     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5885     case ICmpInst::ICMP_SLT:
5886     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5887     case FCmpInst::FCMP_UGT:
5888     case FCmpInst::FCMP_UGE:
5889     case FCmpInst::FCMP_OGT:
5890     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5891     case FCmpInst::FCMP_ULT:
5892     case FCmpInst::FCMP_ULE:
5893     case FCmpInst::FCMP_OLT:
5894     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5895     }
5896   }
5897 
5898   if (isKnownNegation(TrueVal, FalseVal)) {
5899     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5900     // match against either LHS or sext(LHS).
5901     auto MaybeSExtCmpLHS =
5902         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5903     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5904     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5905     if (match(TrueVal, MaybeSExtCmpLHS)) {
5906       // Set the return values. If the compare uses the negated value (-X >s 0),
5907       // swap the return values because the negated value is always 'RHS'.
5908       LHS = TrueVal;
5909       RHS = FalseVal;
5910       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5911         std::swap(LHS, RHS);
5912 
5913       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5914       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5915       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5916         return {SPF_ABS, SPNB_NA, false};
5917 
5918       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5919       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5920         return {SPF_ABS, SPNB_NA, false};
5921 
5922       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5923       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5924       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5925         return {SPF_NABS, SPNB_NA, false};
5926     }
5927     else if (match(FalseVal, MaybeSExtCmpLHS)) {
5928       // Set the return values. If the compare uses the negated value (-X >s 0),
5929       // swap the return values because the negated value is always 'RHS'.
5930       LHS = FalseVal;
5931       RHS = TrueVal;
5932       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5933         std::swap(LHS, RHS);
5934 
5935       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5936       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5937       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5938         return {SPF_NABS, SPNB_NA, false};
5939 
5940       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5941       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5942       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5943         return {SPF_ABS, SPNB_NA, false};
5944     }
5945   }
5946 
5947   if (CmpInst::isIntPredicate(Pred))
5948     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5949 
5950   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5951   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5952   // semantics than minNum. Be conservative in such case.
5953   if (NaNBehavior != SPNB_RETURNS_ANY ||
5954       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5955        !isKnownNonZero(CmpRHS)))
5956     return {SPF_UNKNOWN, SPNB_NA, false};
5957 
5958   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5959 }
5960 
5961 /// Helps to match a select pattern in case of a type mismatch.
5962 ///
5963 /// The function processes the case when type of true and false values of a
5964 /// select instruction differs from type of the cmp instruction operands because
5965 /// of a cast instruction. The function checks if it is legal to move the cast
5966 /// operation after "select". If yes, it returns the new second value of
5967 /// "select" (with the assumption that cast is moved):
5968 /// 1. As operand of cast instruction when both values of "select" are same cast
5969 /// instructions.
5970 /// 2. As restored constant (by applying reverse cast operation) when the first
5971 /// value of the "select" is a cast operation and the second value is a
5972 /// constant.
5973 /// NOTE: We return only the new second value because the first value could be
5974 /// accessed as operand of cast instruction.
5975 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
5976                               Instruction::CastOps *CastOp) {
5977   auto *Cast1 = dyn_cast<CastInst>(V1);
5978   if (!Cast1)
5979     return nullptr;
5980 
5981   *CastOp = Cast1->getOpcode();
5982   Type *SrcTy = Cast1->getSrcTy();
5983   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5984     // If V1 and V2 are both the same cast from the same type, look through V1.
5985     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5986       return Cast2->getOperand(0);
5987     return nullptr;
5988   }
5989 
5990   auto *C = dyn_cast<Constant>(V2);
5991   if (!C)
5992     return nullptr;
5993 
5994   Constant *CastedTo = nullptr;
5995   switch (*CastOp) {
5996   case Instruction::ZExt:
5997     if (CmpI->isUnsigned())
5998       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5999     break;
6000   case Instruction::SExt:
6001     if (CmpI->isSigned())
6002       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
6003     break;
6004   case Instruction::Trunc:
6005     Constant *CmpConst;
6006     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
6007         CmpConst->getType() == SrcTy) {
6008       // Here we have the following case:
6009       //
6010       //   %cond = cmp iN %x, CmpConst
6011       //   %tr = trunc iN %x to iK
6012       //   %narrowsel = select i1 %cond, iK %t, iK C
6013       //
6014       // We can always move trunc after select operation:
6015       //
6016       //   %cond = cmp iN %x, CmpConst
6017       //   %widesel = select i1 %cond, iN %x, iN CmpConst
6018       //   %tr = trunc iN %widesel to iK
6019       //
6020       // Note that C could be extended in any way because we don't care about
6021       // upper bits after truncation. It can't be abs pattern, because it would
6022       // look like:
6023       //
6024       //   select i1 %cond, x, -x.
6025       //
6026       // So only min/max pattern could be matched. Such match requires widened C
6027       // == CmpConst. That is why set widened C = CmpConst, condition trunc
6028       // CmpConst == C is checked below.
6029       CastedTo = CmpConst;
6030     } else {
6031       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
6032     }
6033     break;
6034   case Instruction::FPTrunc:
6035     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
6036     break;
6037   case Instruction::FPExt:
6038     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
6039     break;
6040   case Instruction::FPToUI:
6041     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
6042     break;
6043   case Instruction::FPToSI:
6044     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
6045     break;
6046   case Instruction::UIToFP:
6047     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
6048     break;
6049   case Instruction::SIToFP:
6050     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
6051     break;
6052   default:
6053     break;
6054   }
6055 
6056   if (!CastedTo)
6057     return nullptr;
6058 
6059   // Make sure the cast doesn't lose any information.
6060   Constant *CastedBack =
6061       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
6062   if (CastedBack != C)
6063     return nullptr;
6064 
6065   return CastedTo;
6066 }
6067 
6068 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
6069                                              Instruction::CastOps *CastOp,
6070                                              unsigned Depth) {
6071   if (Depth >= MaxAnalysisRecursionDepth)
6072     return {SPF_UNKNOWN, SPNB_NA, false};
6073 
6074   SelectInst *SI = dyn_cast<SelectInst>(V);
6075   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
6076 
6077   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
6078   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
6079 
6080   Value *TrueVal = SI->getTrueValue();
6081   Value *FalseVal = SI->getFalseValue();
6082 
6083   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
6084                                             CastOp, Depth);
6085 }
6086 
6087 SelectPatternResult llvm::matchDecomposedSelectPattern(
6088     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
6089     Instruction::CastOps *CastOp, unsigned Depth) {
6090   CmpInst::Predicate Pred = CmpI->getPredicate();
6091   Value *CmpLHS = CmpI->getOperand(0);
6092   Value *CmpRHS = CmpI->getOperand(1);
6093   FastMathFlags FMF;
6094   if (isa<FPMathOperator>(CmpI))
6095     FMF = CmpI->getFastMathFlags();
6096 
6097   // Bail out early.
6098   if (CmpI->isEquality())
6099     return {SPF_UNKNOWN, SPNB_NA, false};
6100 
6101   // Deal with type mismatches.
6102   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
6103     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
6104       // If this is a potential fmin/fmax with a cast to integer, then ignore
6105       // -0.0 because there is no corresponding integer value.
6106       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6107         FMF.setNoSignedZeros();
6108       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6109                                   cast<CastInst>(TrueVal)->getOperand(0), C,
6110                                   LHS, RHS, Depth);
6111     }
6112     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
6113       // If this is a potential fmin/fmax with a cast to integer, then ignore
6114       // -0.0 because there is no corresponding integer value.
6115       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6116         FMF.setNoSignedZeros();
6117       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6118                                   C, cast<CastInst>(FalseVal)->getOperand(0),
6119                                   LHS, RHS, Depth);
6120     }
6121   }
6122   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
6123                               LHS, RHS, Depth);
6124 }
6125 
6126 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
6127   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
6128   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
6129   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
6130   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
6131   if (SPF == SPF_FMINNUM)
6132     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
6133   if (SPF == SPF_FMAXNUM)
6134     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
6135   llvm_unreachable("unhandled!");
6136 }
6137 
6138 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
6139   if (SPF == SPF_SMIN) return SPF_SMAX;
6140   if (SPF == SPF_UMIN) return SPF_UMAX;
6141   if (SPF == SPF_SMAX) return SPF_SMIN;
6142   if (SPF == SPF_UMAX) return SPF_UMIN;
6143   llvm_unreachable("unhandled!");
6144 }
6145 
6146 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
6147   switch (MinMaxID) {
6148   case Intrinsic::smax: return Intrinsic::smin;
6149   case Intrinsic::smin: return Intrinsic::smax;
6150   case Intrinsic::umax: return Intrinsic::umin;
6151   case Intrinsic::umin: return Intrinsic::umax;
6152   default: llvm_unreachable("Unexpected intrinsic");
6153   }
6154 }
6155 
6156 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
6157   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
6158 }
6159 
6160 std::pair<Intrinsic::ID, bool>
6161 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
6162   // Check if VL contains select instructions that can be folded into a min/max
6163   // vector intrinsic and return the intrinsic if it is possible.
6164   // TODO: Support floating point min/max.
6165   bool AllCmpSingleUse = true;
6166   SelectPatternResult SelectPattern;
6167   SelectPattern.Flavor = SPF_UNKNOWN;
6168   if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
6169         Value *LHS, *RHS;
6170         auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
6171         if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
6172             CurrentPattern.Flavor == SPF_FMINNUM ||
6173             CurrentPattern.Flavor == SPF_FMAXNUM ||
6174             !I->getType()->isIntOrIntVectorTy())
6175           return false;
6176         if (SelectPattern.Flavor != SPF_UNKNOWN &&
6177             SelectPattern.Flavor != CurrentPattern.Flavor)
6178           return false;
6179         SelectPattern = CurrentPattern;
6180         AllCmpSingleUse &=
6181             match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
6182         return true;
6183       })) {
6184     switch (SelectPattern.Flavor) {
6185     case SPF_SMIN:
6186       return {Intrinsic::smin, AllCmpSingleUse};
6187     case SPF_UMIN:
6188       return {Intrinsic::umin, AllCmpSingleUse};
6189     case SPF_SMAX:
6190       return {Intrinsic::smax, AllCmpSingleUse};
6191     case SPF_UMAX:
6192       return {Intrinsic::umax, AllCmpSingleUse};
6193     default:
6194       llvm_unreachable("unexpected select pattern flavor");
6195     }
6196   }
6197   return {Intrinsic::not_intrinsic, false};
6198 }
6199 
6200 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
6201                                  Value *&Start, Value *&Step) {
6202   // Handle the case of a simple two-predecessor recurrence PHI.
6203   // There's a lot more that could theoretically be done here, but
6204   // this is sufficient to catch some interesting cases.
6205   if (P->getNumIncomingValues() != 2)
6206     return false;
6207 
6208   for (unsigned i = 0; i != 2; ++i) {
6209     Value *L = P->getIncomingValue(i);
6210     Value *R = P->getIncomingValue(!i);
6211     Operator *LU = dyn_cast<Operator>(L);
6212     if (!LU)
6213       continue;
6214     unsigned Opcode = LU->getOpcode();
6215 
6216     switch (Opcode) {
6217     default:
6218       continue;
6219     // TODO: Expand list -- xor, div, gep, uaddo, etc..
6220     case Instruction::LShr:
6221     case Instruction::AShr:
6222     case Instruction::Shl:
6223     case Instruction::Add:
6224     case Instruction::Sub:
6225     case Instruction::And:
6226     case Instruction::Or:
6227     case Instruction::Mul: {
6228       Value *LL = LU->getOperand(0);
6229       Value *LR = LU->getOperand(1);
6230       // Find a recurrence.
6231       if (LL == P)
6232         L = LR;
6233       else if (LR == P)
6234         L = LL;
6235       else
6236         continue; // Check for recurrence with L and R flipped.
6237 
6238       break; // Match!
6239     }
6240     };
6241 
6242     // We have matched a recurrence of the form:
6243     //   %iv = [R, %entry], [%iv.next, %backedge]
6244     //   %iv.next = binop %iv, L
6245     // OR
6246     //   %iv = [R, %entry], [%iv.next, %backedge]
6247     //   %iv.next = binop L, %iv
6248     BO = cast<BinaryOperator>(LU);
6249     Start = R;
6250     Step = L;
6251     return true;
6252   }
6253   return false;
6254 }
6255 
6256 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
6257                                  Value *&Start, Value *&Step) {
6258   BinaryOperator *BO = nullptr;
6259   P = dyn_cast<PHINode>(I->getOperand(0));
6260   if (!P)
6261     P = dyn_cast<PHINode>(I->getOperand(1));
6262   return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
6263 }
6264 
6265 /// Return true if "icmp Pred LHS RHS" is always true.
6266 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6267                             const Value *RHS, const DataLayout &DL,
6268                             unsigned Depth) {
6269   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
6270   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6271     return true;
6272 
6273   switch (Pred) {
6274   default:
6275     return false;
6276 
6277   case CmpInst::ICMP_SLE: {
6278     const APInt *C;
6279 
6280     // LHS s<= LHS +_{nsw} C   if C >= 0
6281     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6282       return !C->isNegative();
6283     return false;
6284   }
6285 
6286   case CmpInst::ICMP_ULE: {
6287     const APInt *C;
6288 
6289     // LHS u<= LHS +_{nuw} C   for any C
6290     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6291       return true;
6292 
6293     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6294     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6295                                        const Value *&X,
6296                                        const APInt *&CA, const APInt *&CB) {
6297       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6298           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6299         return true;
6300 
6301       // If X & C == 0 then (X | C) == X +_{nuw} C
6302       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6303           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6304         KnownBits Known(CA->getBitWidth());
6305         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6306                          /*CxtI*/ nullptr, /*DT*/ nullptr);
6307         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6308           return true;
6309       }
6310 
6311       return false;
6312     };
6313 
6314     const Value *X;
6315     const APInt *CLHS, *CRHS;
6316     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6317       return CLHS->ule(*CRHS);
6318 
6319     return false;
6320   }
6321   }
6322 }
6323 
6324 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6325 /// ALHS ARHS" is true.  Otherwise, return None.
6326 static Optional<bool>
6327 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6328                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
6329                       const DataLayout &DL, unsigned Depth) {
6330   switch (Pred) {
6331   default:
6332     return None;
6333 
6334   case CmpInst::ICMP_SLT:
6335   case CmpInst::ICMP_SLE:
6336     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6337         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6338       return true;
6339     return None;
6340 
6341   case CmpInst::ICMP_ULT:
6342   case CmpInst::ICMP_ULE:
6343     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6344         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6345       return true;
6346     return None;
6347   }
6348 }
6349 
6350 /// Return true if the operands of the two compares match.  IsSwappedOps is true
6351 /// when the operands match, but are swapped.
6352 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6353                           const Value *BLHS, const Value *BRHS,
6354                           bool &IsSwappedOps) {
6355 
6356   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6357   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6358   return IsMatchingOps || IsSwappedOps;
6359 }
6360 
6361 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6362 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6363 /// Otherwise, return None if we can't infer anything.
6364 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6365                                                     CmpInst::Predicate BPred,
6366                                                     bool AreSwappedOps) {
6367   // Canonicalize the predicate as if the operands were not commuted.
6368   if (AreSwappedOps)
6369     BPred = ICmpInst::getSwappedPredicate(BPred);
6370 
6371   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6372     return true;
6373   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6374     return false;
6375 
6376   return None;
6377 }
6378 
6379 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6380 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6381 /// Otherwise, return None if we can't infer anything.
6382 static Optional<bool>
6383 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6384                                  const ConstantInt *C1,
6385                                  CmpInst::Predicate BPred,
6386                                  const ConstantInt *C2) {
6387   ConstantRange DomCR =
6388       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6389   ConstantRange CR =
6390       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
6391   ConstantRange Intersection = DomCR.intersectWith(CR);
6392   ConstantRange Difference = DomCR.difference(CR);
6393   if (Intersection.isEmptySet())
6394     return false;
6395   if (Difference.isEmptySet())
6396     return true;
6397   return None;
6398 }
6399 
6400 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6401 /// false.  Otherwise, return None if we can't infer anything.
6402 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6403                                          CmpInst::Predicate BPred,
6404                                          const Value *BLHS, const Value *BRHS,
6405                                          const DataLayout &DL, bool LHSIsTrue,
6406                                          unsigned Depth) {
6407   Value *ALHS = LHS->getOperand(0);
6408   Value *ARHS = LHS->getOperand(1);
6409 
6410   // The rest of the logic assumes the LHS condition is true.  If that's not the
6411   // case, invert the predicate to make it so.
6412   CmpInst::Predicate APred =
6413       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6414 
6415   // Can we infer anything when the two compares have matching operands?
6416   bool AreSwappedOps;
6417   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6418     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6419             APred, BPred, AreSwappedOps))
6420       return Implication;
6421     // No amount of additional analysis will infer the second condition, so
6422     // early exit.
6423     return None;
6424   }
6425 
6426   // Can we infer anything when the LHS operands match and the RHS operands are
6427   // constants (not necessarily matching)?
6428   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6429     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6430             APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6431       return Implication;
6432     // No amount of additional analysis will infer the second condition, so
6433     // early exit.
6434     return None;
6435   }
6436 
6437   if (APred == BPred)
6438     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6439   return None;
6440 }
6441 
6442 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6443 /// false.  Otherwise, return None if we can't infer anything.  We expect the
6444 /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction.
6445 static Optional<bool>
6446 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6447                    const Value *RHSOp0, const Value *RHSOp1,
6448                    const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6449   // The LHS must be an 'or', 'and', or a 'select' instruction.
6450   assert((LHS->getOpcode() == Instruction::And ||
6451           LHS->getOpcode() == Instruction::Or ||
6452           LHS->getOpcode() == Instruction::Select) &&
6453          "Expected LHS to be 'and', 'or', or 'select'.");
6454 
6455   assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6456 
6457   // If the result of an 'or' is false, then we know both legs of the 'or' are
6458   // false.  Similarly, if the result of an 'and' is true, then we know both
6459   // legs of the 'and' are true.
6460   const Value *ALHS, *ARHS;
6461   if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6462       (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6463     // FIXME: Make this non-recursion.
6464     if (Optional<bool> Implication = isImpliedCondition(
6465             ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6466       return Implication;
6467     if (Optional<bool> Implication = isImpliedCondition(
6468             ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6469       return Implication;
6470     return None;
6471   }
6472   return None;
6473 }
6474 
6475 Optional<bool>
6476 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6477                          const Value *RHSOp0, const Value *RHSOp1,
6478                          const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6479   // Bail out when we hit the limit.
6480   if (Depth == MaxAnalysisRecursionDepth)
6481     return None;
6482 
6483   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6484   // example.
6485   if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6486     return None;
6487 
6488   Type *OpTy = LHS->getType();
6489   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6490 
6491   // FIXME: Extending the code below to handle vectors.
6492   if (OpTy->isVectorTy())
6493     return None;
6494 
6495   assert(OpTy->isIntegerTy(1) && "implied by above");
6496 
6497   // Both LHS and RHS are icmps.
6498   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6499   if (LHSCmp)
6500     return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6501                               Depth);
6502 
6503   /// The LHS should be an 'or', 'and', or a 'select' instruction.  We expect
6504   /// the RHS to be an icmp.
6505   /// FIXME: Add support for and/or/select on the RHS.
6506   if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6507     if ((LHSI->getOpcode() == Instruction::And ||
6508          LHSI->getOpcode() == Instruction::Or ||
6509          LHSI->getOpcode() == Instruction::Select))
6510       return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6511                                 Depth);
6512   }
6513   return None;
6514 }
6515 
6516 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6517                                         const DataLayout &DL, bool LHSIsTrue,
6518                                         unsigned Depth) {
6519   // LHS ==> RHS by definition
6520   if (LHS == RHS)
6521     return LHSIsTrue;
6522 
6523   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6524   if (RHSCmp)
6525     return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6526                               RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6527                               LHSIsTrue, Depth);
6528   return None;
6529 }
6530 
6531 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6532 // condition dominating ContextI or nullptr, if no condition is found.
6533 static std::pair<Value *, bool>
6534 getDomPredecessorCondition(const Instruction *ContextI) {
6535   if (!ContextI || !ContextI->getParent())
6536     return {nullptr, false};
6537 
6538   // TODO: This is a poor/cheap way to determine dominance. Should we use a
6539   // dominator tree (eg, from a SimplifyQuery) instead?
6540   const BasicBlock *ContextBB = ContextI->getParent();
6541   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6542   if (!PredBB)
6543     return {nullptr, false};
6544 
6545   // We need a conditional branch in the predecessor.
6546   Value *PredCond;
6547   BasicBlock *TrueBB, *FalseBB;
6548   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6549     return {nullptr, false};
6550 
6551   // The branch should get simplified. Don't bother simplifying this condition.
6552   if (TrueBB == FalseBB)
6553     return {nullptr, false};
6554 
6555   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6556          "Predecessor block does not point to successor?");
6557 
6558   // Is this condition implied by the predecessor condition?
6559   return {PredCond, TrueBB == ContextBB};
6560 }
6561 
6562 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6563                                              const Instruction *ContextI,
6564                                              const DataLayout &DL) {
6565   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6566   auto PredCond = getDomPredecessorCondition(ContextI);
6567   if (PredCond.first)
6568     return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6569   return None;
6570 }
6571 
6572 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6573                                              const Value *LHS, const Value *RHS,
6574                                              const Instruction *ContextI,
6575                                              const DataLayout &DL) {
6576   auto PredCond = getDomPredecessorCondition(ContextI);
6577   if (PredCond.first)
6578     return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6579                               PredCond.second);
6580   return None;
6581 }
6582 
6583 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6584                               APInt &Upper, const InstrInfoQuery &IIQ) {
6585   unsigned Width = Lower.getBitWidth();
6586   const APInt *C;
6587   switch (BO.getOpcode()) {
6588   case Instruction::Add:
6589     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6590       // FIXME: If we have both nuw and nsw, we should reduce the range further.
6591       if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6592         // 'add nuw x, C' produces [C, UINT_MAX].
6593         Lower = *C;
6594       } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6595         if (C->isNegative()) {
6596           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6597           Lower = APInt::getSignedMinValue(Width);
6598           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6599         } else {
6600           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6601           Lower = APInt::getSignedMinValue(Width) + *C;
6602           Upper = APInt::getSignedMaxValue(Width) + 1;
6603         }
6604       }
6605     }
6606     break;
6607 
6608   case Instruction::And:
6609     if (match(BO.getOperand(1), m_APInt(C)))
6610       // 'and x, C' produces [0, C].
6611       Upper = *C + 1;
6612     break;
6613 
6614   case Instruction::Or:
6615     if (match(BO.getOperand(1), m_APInt(C)))
6616       // 'or x, C' produces [C, UINT_MAX].
6617       Lower = *C;
6618     break;
6619 
6620   case Instruction::AShr:
6621     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6622       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6623       Lower = APInt::getSignedMinValue(Width).ashr(*C);
6624       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6625     } else if (match(BO.getOperand(0), m_APInt(C))) {
6626       unsigned ShiftAmount = Width - 1;
6627       if (!C->isNullValue() && IIQ.isExact(&BO))
6628         ShiftAmount = C->countTrailingZeros();
6629       if (C->isNegative()) {
6630         // 'ashr C, x' produces [C, C >> (Width-1)]
6631         Lower = *C;
6632         Upper = C->ashr(ShiftAmount) + 1;
6633       } else {
6634         // 'ashr C, x' produces [C >> (Width-1), C]
6635         Lower = C->ashr(ShiftAmount);
6636         Upper = *C + 1;
6637       }
6638     }
6639     break;
6640 
6641   case Instruction::LShr:
6642     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6643       // 'lshr x, C' produces [0, UINT_MAX >> C].
6644       Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
6645     } else if (match(BO.getOperand(0), m_APInt(C))) {
6646       // 'lshr C, x' produces [C >> (Width-1), C].
6647       unsigned ShiftAmount = Width - 1;
6648       if (!C->isNullValue() && IIQ.isExact(&BO))
6649         ShiftAmount = C->countTrailingZeros();
6650       Lower = C->lshr(ShiftAmount);
6651       Upper = *C + 1;
6652     }
6653     break;
6654 
6655   case Instruction::Shl:
6656     if (match(BO.getOperand(0), m_APInt(C))) {
6657       if (IIQ.hasNoUnsignedWrap(&BO)) {
6658         // 'shl nuw C, x' produces [C, C << CLZ(C)]
6659         Lower = *C;
6660         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6661       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6662         if (C->isNegative()) {
6663           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6664           unsigned ShiftAmount = C->countLeadingOnes() - 1;
6665           Lower = C->shl(ShiftAmount);
6666           Upper = *C + 1;
6667         } else {
6668           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6669           unsigned ShiftAmount = C->countLeadingZeros() - 1;
6670           Lower = *C;
6671           Upper = C->shl(ShiftAmount) + 1;
6672         }
6673       }
6674     }
6675     break;
6676 
6677   case Instruction::SDiv:
6678     if (match(BO.getOperand(1), m_APInt(C))) {
6679       APInt IntMin = APInt::getSignedMinValue(Width);
6680       APInt IntMax = APInt::getSignedMaxValue(Width);
6681       if (C->isAllOnesValue()) {
6682         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6683         //    where C != -1 and C != 0 and C != 1
6684         Lower = IntMin + 1;
6685         Upper = IntMax + 1;
6686       } else if (C->countLeadingZeros() < Width - 1) {
6687         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6688         //    where C != -1 and C != 0 and C != 1
6689         Lower = IntMin.sdiv(*C);
6690         Upper = IntMax.sdiv(*C);
6691         if (Lower.sgt(Upper))
6692           std::swap(Lower, Upper);
6693         Upper = Upper + 1;
6694         assert(Upper != Lower && "Upper part of range has wrapped!");
6695       }
6696     } else if (match(BO.getOperand(0), m_APInt(C))) {
6697       if (C->isMinSignedValue()) {
6698         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6699         Lower = *C;
6700         Upper = Lower.lshr(1) + 1;
6701       } else {
6702         // 'sdiv C, x' produces [-|C|, |C|].
6703         Upper = C->abs() + 1;
6704         Lower = (-Upper) + 1;
6705       }
6706     }
6707     break;
6708 
6709   case Instruction::UDiv:
6710     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6711       // 'udiv x, C' produces [0, UINT_MAX / C].
6712       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6713     } else if (match(BO.getOperand(0), m_APInt(C))) {
6714       // 'udiv C, x' produces [0, C].
6715       Upper = *C + 1;
6716     }
6717     break;
6718 
6719   case Instruction::SRem:
6720     if (match(BO.getOperand(1), m_APInt(C))) {
6721       // 'srem x, C' produces (-|C|, |C|).
6722       Upper = C->abs();
6723       Lower = (-Upper) + 1;
6724     }
6725     break;
6726 
6727   case Instruction::URem:
6728     if (match(BO.getOperand(1), m_APInt(C)))
6729       // 'urem x, C' produces [0, C).
6730       Upper = *C;
6731     break;
6732 
6733   default:
6734     break;
6735   }
6736 }
6737 
6738 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6739                                   APInt &Upper) {
6740   unsigned Width = Lower.getBitWidth();
6741   const APInt *C;
6742   switch (II.getIntrinsicID()) {
6743   case Intrinsic::ctpop:
6744   case Intrinsic::ctlz:
6745   case Intrinsic::cttz:
6746     // Maximum of set/clear bits is the bit width.
6747     assert(Lower == 0 && "Expected lower bound to be zero");
6748     Upper = Width + 1;
6749     break;
6750   case Intrinsic::uadd_sat:
6751     // uadd.sat(x, C) produces [C, UINT_MAX].
6752     if (match(II.getOperand(0), m_APInt(C)) ||
6753         match(II.getOperand(1), m_APInt(C)))
6754       Lower = *C;
6755     break;
6756   case Intrinsic::sadd_sat:
6757     if (match(II.getOperand(0), m_APInt(C)) ||
6758         match(II.getOperand(1), m_APInt(C))) {
6759       if (C->isNegative()) {
6760         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6761         Lower = APInt::getSignedMinValue(Width);
6762         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6763       } else {
6764         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6765         Lower = APInt::getSignedMinValue(Width) + *C;
6766         Upper = APInt::getSignedMaxValue(Width) + 1;
6767       }
6768     }
6769     break;
6770   case Intrinsic::usub_sat:
6771     // usub.sat(C, x) produces [0, C].
6772     if (match(II.getOperand(0), m_APInt(C)))
6773       Upper = *C + 1;
6774     // usub.sat(x, C) produces [0, UINT_MAX - C].
6775     else if (match(II.getOperand(1), m_APInt(C)))
6776       Upper = APInt::getMaxValue(Width) - *C + 1;
6777     break;
6778   case Intrinsic::ssub_sat:
6779     if (match(II.getOperand(0), m_APInt(C))) {
6780       if (C->isNegative()) {
6781         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6782         Lower = APInt::getSignedMinValue(Width);
6783         Upper = *C - APInt::getSignedMinValue(Width) + 1;
6784       } else {
6785         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6786         Lower = *C - APInt::getSignedMaxValue(Width);
6787         Upper = APInt::getSignedMaxValue(Width) + 1;
6788       }
6789     } else if (match(II.getOperand(1), m_APInt(C))) {
6790       if (C->isNegative()) {
6791         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6792         Lower = APInt::getSignedMinValue(Width) - *C;
6793         Upper = APInt::getSignedMaxValue(Width) + 1;
6794       } else {
6795         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6796         Lower = APInt::getSignedMinValue(Width);
6797         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6798       }
6799     }
6800     break;
6801   case Intrinsic::umin:
6802   case Intrinsic::umax:
6803   case Intrinsic::smin:
6804   case Intrinsic::smax:
6805     if (!match(II.getOperand(0), m_APInt(C)) &&
6806         !match(II.getOperand(1), m_APInt(C)))
6807       break;
6808 
6809     switch (II.getIntrinsicID()) {
6810     case Intrinsic::umin:
6811       Upper = *C + 1;
6812       break;
6813     case Intrinsic::umax:
6814       Lower = *C;
6815       break;
6816     case Intrinsic::smin:
6817       Lower = APInt::getSignedMinValue(Width);
6818       Upper = *C + 1;
6819       break;
6820     case Intrinsic::smax:
6821       Lower = *C;
6822       Upper = APInt::getSignedMaxValue(Width) + 1;
6823       break;
6824     default:
6825       llvm_unreachable("Must be min/max intrinsic");
6826     }
6827     break;
6828   case Intrinsic::abs:
6829     // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6830     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6831     if (match(II.getOperand(1), m_One()))
6832       Upper = APInt::getSignedMaxValue(Width) + 1;
6833     else
6834       Upper = APInt::getSignedMinValue(Width) + 1;
6835     break;
6836   default:
6837     break;
6838   }
6839 }
6840 
6841 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6842                                       APInt &Upper, const InstrInfoQuery &IIQ) {
6843   const Value *LHS = nullptr, *RHS = nullptr;
6844   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6845   if (R.Flavor == SPF_UNKNOWN)
6846     return;
6847 
6848   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6849 
6850   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6851     // If the negation part of the abs (in RHS) has the NSW flag,
6852     // then the result of abs(X) is [0..SIGNED_MAX],
6853     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6854     Lower = APInt::getNullValue(BitWidth);
6855     if (match(RHS, m_Neg(m_Specific(LHS))) &&
6856         IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6857       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6858     else
6859       Upper = APInt::getSignedMinValue(BitWidth) + 1;
6860     return;
6861   }
6862 
6863   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6864     // The result of -abs(X) is <= 0.
6865     Lower = APInt::getSignedMinValue(BitWidth);
6866     Upper = APInt(BitWidth, 1);
6867     return;
6868   }
6869 
6870   const APInt *C;
6871   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6872     return;
6873 
6874   switch (R.Flavor) {
6875     case SPF_UMIN:
6876       Upper = *C + 1;
6877       break;
6878     case SPF_UMAX:
6879       Lower = *C;
6880       break;
6881     case SPF_SMIN:
6882       Lower = APInt::getSignedMinValue(BitWidth);
6883       Upper = *C + 1;
6884       break;
6885     case SPF_SMAX:
6886       Lower = *C;
6887       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6888       break;
6889     default:
6890       break;
6891   }
6892 }
6893 
6894 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
6895                                          AssumptionCache *AC,
6896                                          const Instruction *CtxI,
6897                                          unsigned Depth) {
6898   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
6899 
6900   if (Depth == MaxAnalysisRecursionDepth)
6901     return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
6902 
6903   const APInt *C;
6904   if (match(V, m_APInt(C)))
6905     return ConstantRange(*C);
6906 
6907   InstrInfoQuery IIQ(UseInstrInfo);
6908   unsigned BitWidth = V->getType()->getScalarSizeInBits();
6909   APInt Lower = APInt(BitWidth, 0);
6910   APInt Upper = APInt(BitWidth, 0);
6911   if (auto *BO = dyn_cast<BinaryOperator>(V))
6912     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
6913   else if (auto *II = dyn_cast<IntrinsicInst>(V))
6914     setLimitsForIntrinsic(*II, Lower, Upper);
6915   else if (auto *SI = dyn_cast<SelectInst>(V))
6916     setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
6917 
6918   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
6919 
6920   if (auto *I = dyn_cast<Instruction>(V))
6921     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
6922       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
6923 
6924   if (CtxI && AC) {
6925     // Try to restrict the range based on information from assumptions.
6926     for (auto &AssumeVH : AC->assumptionsFor(V)) {
6927       if (!AssumeVH)
6928         continue;
6929       CallInst *I = cast<CallInst>(AssumeVH);
6930       assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
6931              "Got assumption for the wrong function!");
6932       assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
6933              "must be an assume intrinsic");
6934 
6935       if (!isValidAssumeForContext(I, CtxI, nullptr))
6936         continue;
6937       Value *Arg = I->getArgOperand(0);
6938       ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
6939       // Currently we just use information from comparisons.
6940       if (!Cmp || Cmp->getOperand(0) != V)
6941         continue;
6942       ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
6943                                                AC, I, Depth + 1);
6944       CR = CR.intersectWith(
6945           ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS));
6946     }
6947   }
6948 
6949   return CR;
6950 }
6951 
6952 static Optional<int64_t>
6953 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
6954   // Skip over the first indices.
6955   gep_type_iterator GTI = gep_type_begin(GEP);
6956   for (unsigned i = 1; i != Idx; ++i, ++GTI)
6957     /*skip along*/;
6958 
6959   // Compute the offset implied by the rest of the indices.
6960   int64_t Offset = 0;
6961   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
6962     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
6963     if (!OpC)
6964       return None;
6965     if (OpC->isZero())
6966       continue; // No offset.
6967 
6968     // Handle struct indices, which add their field offset to the pointer.
6969     if (StructType *STy = GTI.getStructTypeOrNull()) {
6970       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
6971       continue;
6972     }
6973 
6974     // Otherwise, we have a sequential type like an array or fixed-length
6975     // vector. Multiply the index by the ElementSize.
6976     TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
6977     if (Size.isScalable())
6978       return None;
6979     Offset += Size.getFixedSize() * OpC->getSExtValue();
6980   }
6981 
6982   return Offset;
6983 }
6984 
6985 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
6986                                         const DataLayout &DL) {
6987   Ptr1 = Ptr1->stripPointerCasts();
6988   Ptr2 = Ptr2->stripPointerCasts();
6989 
6990   // Handle the trivial case first.
6991   if (Ptr1 == Ptr2) {
6992     return 0;
6993   }
6994 
6995   const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
6996   const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
6997 
6998   // If one pointer is a GEP see if the GEP is a constant offset from the base,
6999   // as in "P" and "gep P, 1".
7000   // Also do this iteratively to handle the the following case:
7001   //   Ptr_t1 = GEP Ptr1, c1
7002   //   Ptr_t2 = GEP Ptr_t1, c2
7003   //   Ptr2 = GEP Ptr_t2, c3
7004   // where we will return c1+c2+c3.
7005   // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
7006   // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
7007   // are the same, and return the difference between offsets.
7008   auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
7009                                  const Value *Ptr) -> Optional<int64_t> {
7010     const GEPOperator *GEP_T = GEP;
7011     int64_t OffsetVal = 0;
7012     bool HasSameBase = false;
7013     while (GEP_T) {
7014       auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
7015       if (!Offset)
7016         return None;
7017       OffsetVal += *Offset;
7018       auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
7019       if (Op0 == Ptr) {
7020         HasSameBase = true;
7021         break;
7022       }
7023       GEP_T = dyn_cast<GEPOperator>(Op0);
7024     }
7025     if (!HasSameBase)
7026       return None;
7027     return OffsetVal;
7028   };
7029 
7030   if (GEP1) {
7031     auto Offset = getOffsetFromBase(GEP1, Ptr2);
7032     if (Offset)
7033       return -*Offset;
7034   }
7035   if (GEP2) {
7036     auto Offset = getOffsetFromBase(GEP2, Ptr1);
7037     if (Offset)
7038       return Offset;
7039   }
7040 
7041   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
7042   // base.  After that base, they may have some number of common (and
7043   // potentially variable) indices.  After that they handle some constant
7044   // offset, which determines their offset from each other.  At this point, we
7045   // handle no other case.
7046   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
7047     return None;
7048 
7049   // Skip any common indices and track the GEP types.
7050   unsigned Idx = 1;
7051   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
7052     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
7053       break;
7054 
7055   auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
7056   auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
7057   if (!Offset1 || !Offset2)
7058     return None;
7059   return *Offset2 - *Offset1;
7060 }
7061