1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/IntrinsicsAArch64.h"
55 #include "llvm/IR/IntrinsicsX86.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/User.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/KnownBits.h"
69 #include "llvm/Support/MathExtras.h"
70 #include <algorithm>
71 #include <array>
72 #include <cassert>
73 #include <cstdint>
74 #include <iterator>
75 #include <utility>
76 
77 using namespace llvm;
78 using namespace llvm::PatternMatch;
79 
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83                                               cl::Hidden, cl::init(20));
84 
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88   if (unsigned BitWidth = Ty->getScalarSizeInBits())
89     return BitWidth;
90 
91   return DL.getPointerTypeSizeInBits(Ty);
92 }
93 
94 namespace {
95 
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101   const DataLayout &DL;
102   AssumptionCache *AC;
103   const Instruction *CxtI;
104   const DominatorTree *DT;
105 
106   // Unlike the other analyses, this may be a nullptr because not all clients
107   // provide it currently.
108   OptimizationRemarkEmitter *ORE;
109 
110   /// Set of assumptions that should be excluded from further queries.
111   /// This is because of the potential for mutual recursion to cause
112   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113   /// classic case of this is assume(x = y), which will attempt to determine
114   /// bits in x from bits in y, which will attempt to determine bits in y from
115   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117   /// (all of which can call computeKnownBits), and so on.
118   std::array<const Value *, MaxAnalysisRecursionDepth> Excluded;
119 
120   /// If true, it is safe to use metadata during simplification.
121   InstrInfoQuery IIQ;
122 
123   unsigned NumExcluded = 0;
124 
125   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126         const DominatorTree *DT, bool UseInstrInfo,
127         OptimizationRemarkEmitter *ORE = nullptr)
128       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129 
130   Query(const Query &Q, const Value *NewExcl)
131       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132         NumExcluded(Q.NumExcluded) {
133     Excluded = Q.Excluded;
134     Excluded[NumExcluded++] = NewExcl;
135     assert(NumExcluded <= Excluded.size());
136   }
137 
138   bool isExcluded(const Value *Value) const {
139     if (NumExcluded == 0)
140       return false;
141     auto End = Excluded.begin() + NumExcluded;
142     return std::find(Excluded.begin(), End, Value) != End;
143   }
144 };
145 
146 } // end anonymous namespace
147 
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151   // If we've been provided with a context instruction, then use that (provided
152   // it has been inserted).
153   if (CxtI && CxtI->getParent())
154     return CxtI;
155 
156   // If the value is really an already-inserted instruction, then use that.
157   CxtI = dyn_cast<Instruction>(V);
158   if (CxtI && CxtI->getParent())
159     return CxtI;
160 
161   return nullptr;
162 }
163 
164 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
165   // If we've been provided with a context instruction, then use that (provided
166   // it has been inserted).
167   if (CxtI && CxtI->getParent())
168     return CxtI;
169 
170   // If the value is really an already-inserted instruction, then use that.
171   CxtI = dyn_cast<Instruction>(V1);
172   if (CxtI && CxtI->getParent())
173     return CxtI;
174 
175   CxtI = dyn_cast<Instruction>(V2);
176   if (CxtI && CxtI->getParent())
177     return CxtI;
178 
179   return nullptr;
180 }
181 
182 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
183                                    const APInt &DemandedElts,
184                                    APInt &DemandedLHS, APInt &DemandedRHS) {
185   // The length of scalable vectors is unknown at compile time, thus we
186   // cannot check their values
187   if (isa<ScalableVectorType>(Shuf->getType()))
188     return false;
189 
190   int NumElts =
191       cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
192   int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
193   DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
194   if (DemandedElts.isNullValue())
195     return true;
196   // Simple case of a shuffle with zeroinitializer.
197   if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
198     DemandedLHS.setBit(0);
199     return true;
200   }
201   for (int i = 0; i != NumMaskElts; ++i) {
202     if (!DemandedElts[i])
203       continue;
204     int M = Shuf->getMaskValue(i);
205     assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
206 
207     // For undef elements, we don't know anything about the common state of
208     // the shuffle result.
209     if (M == -1)
210       return false;
211     if (M < NumElts)
212       DemandedLHS.setBit(M % NumElts);
213     else
214       DemandedRHS.setBit(M % NumElts);
215   }
216 
217   return true;
218 }
219 
220 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
221                              KnownBits &Known, unsigned Depth, const Query &Q);
222 
223 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
224                              const Query &Q) {
225   // FIXME: We currently have no way to represent the DemandedElts of a scalable
226   // vector
227   if (isa<ScalableVectorType>(V->getType())) {
228     Known.resetAll();
229     return;
230   }
231 
232   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
233   APInt DemandedElts =
234       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
235   computeKnownBits(V, DemandedElts, Known, Depth, Q);
236 }
237 
238 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
239                             const DataLayout &DL, unsigned Depth,
240                             AssumptionCache *AC, const Instruction *CxtI,
241                             const DominatorTree *DT,
242                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
243   ::computeKnownBits(V, Known, Depth,
244                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
245 }
246 
247 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
248                             KnownBits &Known, const DataLayout &DL,
249                             unsigned Depth, AssumptionCache *AC,
250                             const Instruction *CxtI, const DominatorTree *DT,
251                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
252   ::computeKnownBits(V, DemandedElts, Known, Depth,
253                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
254 }
255 
256 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
257                                   unsigned Depth, const Query &Q);
258 
259 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
260                                   const Query &Q);
261 
262 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
263                                  unsigned Depth, AssumptionCache *AC,
264                                  const Instruction *CxtI,
265                                  const DominatorTree *DT,
266                                  OptimizationRemarkEmitter *ORE,
267                                  bool UseInstrInfo) {
268   return ::computeKnownBits(
269       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
270 }
271 
272 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
273                                  const DataLayout &DL, unsigned Depth,
274                                  AssumptionCache *AC, const Instruction *CxtI,
275                                  const DominatorTree *DT,
276                                  OptimizationRemarkEmitter *ORE,
277                                  bool UseInstrInfo) {
278   return ::computeKnownBits(
279       V, DemandedElts, Depth,
280       Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
281 }
282 
283 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
284                                const DataLayout &DL, AssumptionCache *AC,
285                                const Instruction *CxtI, const DominatorTree *DT,
286                                bool UseInstrInfo) {
287   assert(LHS->getType() == RHS->getType() &&
288          "LHS and RHS should have the same type");
289   assert(LHS->getType()->isIntOrIntVectorTy() &&
290          "LHS and RHS should be integers");
291   // Look for an inverted mask: (X & ~M) op (Y & M).
292   Value *M;
293   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
294       match(RHS, m_c_And(m_Specific(M), m_Value())))
295     return true;
296   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
297       match(LHS, m_c_And(m_Specific(M), m_Value())))
298     return true;
299   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
300   KnownBits LHSKnown(IT->getBitWidth());
301   KnownBits RHSKnown(IT->getBitWidth());
302   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
303   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
304   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
305 }
306 
307 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
308   for (const User *U : CxtI->users()) {
309     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
310       if (IC->isEquality())
311         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
312           if (C->isNullValue())
313             continue;
314     return false;
315   }
316   return true;
317 }
318 
319 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
320                                    const Query &Q);
321 
322 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
323                                   bool OrZero, unsigned Depth,
324                                   AssumptionCache *AC, const Instruction *CxtI,
325                                   const DominatorTree *DT, bool UseInstrInfo) {
326   return ::isKnownToBeAPowerOfTwo(
327       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
328 }
329 
330 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
331                            unsigned Depth, const Query &Q);
332 
333 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
334 
335 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
336                           AssumptionCache *AC, const Instruction *CxtI,
337                           const DominatorTree *DT, bool UseInstrInfo) {
338   return ::isKnownNonZero(V, Depth,
339                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
340 }
341 
342 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
343                               unsigned Depth, AssumptionCache *AC,
344                               const Instruction *CxtI, const DominatorTree *DT,
345                               bool UseInstrInfo) {
346   KnownBits Known =
347       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
348   return Known.isNonNegative();
349 }
350 
351 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
352                            AssumptionCache *AC, const Instruction *CxtI,
353                            const DominatorTree *DT, bool UseInstrInfo) {
354   if (auto *CI = dyn_cast<ConstantInt>(V))
355     return CI->getValue().isStrictlyPositive();
356 
357   // TODO: We'd doing two recursive queries here.  We should factor this such
358   // that only a single query is needed.
359   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
360          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
361 }
362 
363 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
364                            AssumptionCache *AC, const Instruction *CxtI,
365                            const DominatorTree *DT, bool UseInstrInfo) {
366   KnownBits Known =
367       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
368   return Known.isNegative();
369 }
370 
371 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
372                             const Query &Q);
373 
374 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
375                            const DataLayout &DL, AssumptionCache *AC,
376                            const Instruction *CxtI, const DominatorTree *DT,
377                            bool UseInstrInfo) {
378   return ::isKnownNonEqual(V1, V2, 0,
379                            Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
380                                  UseInstrInfo, /*ORE=*/nullptr));
381 }
382 
383 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
384                               const Query &Q);
385 
386 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
387                              const DataLayout &DL, unsigned Depth,
388                              AssumptionCache *AC, const Instruction *CxtI,
389                              const DominatorTree *DT, bool UseInstrInfo) {
390   return ::MaskedValueIsZero(
391       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
392 }
393 
394 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
395                                    unsigned Depth, const Query &Q);
396 
397 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
398                                    const Query &Q) {
399   // FIXME: We currently have no way to represent the DemandedElts of a scalable
400   // vector
401   if (isa<ScalableVectorType>(V->getType()))
402     return 1;
403 
404   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
405   APInt DemandedElts =
406       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
407   return ComputeNumSignBits(V, DemandedElts, Depth, Q);
408 }
409 
410 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
411                                   unsigned Depth, AssumptionCache *AC,
412                                   const Instruction *CxtI,
413                                   const DominatorTree *DT, bool UseInstrInfo) {
414   return ::ComputeNumSignBits(
415       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
416 }
417 
418 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
419                                    bool NSW, const APInt &DemandedElts,
420                                    KnownBits &KnownOut, KnownBits &Known2,
421                                    unsigned Depth, const Query &Q) {
422   computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
423 
424   // If one operand is unknown and we have no nowrap information,
425   // the result will be unknown independently of the second operand.
426   if (KnownOut.isUnknown() && !NSW)
427     return;
428 
429   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
430   KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
431 }
432 
433 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
434                                 const APInt &DemandedElts, KnownBits &Known,
435                                 KnownBits &Known2, unsigned Depth,
436                                 const Query &Q) {
437   computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
438   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
439 
440   bool isKnownNegative = false;
441   bool isKnownNonNegative = false;
442   // If the multiplication is known not to overflow, compute the sign bit.
443   if (NSW) {
444     if (Op0 == Op1) {
445       // The product of a number with itself is non-negative.
446       isKnownNonNegative = true;
447     } else {
448       bool isKnownNonNegativeOp1 = Known.isNonNegative();
449       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
450       bool isKnownNegativeOp1 = Known.isNegative();
451       bool isKnownNegativeOp0 = Known2.isNegative();
452       // The product of two numbers with the same sign is non-negative.
453       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
454                            (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
455       // The product of a negative number and a non-negative number is either
456       // negative or zero.
457       if (!isKnownNonNegative)
458         isKnownNegative =
459             (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
460              Known2.isNonZero()) ||
461             (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
462     }
463   }
464 
465   Known = KnownBits::computeForMul(Known, Known2);
466 
467   // Only make use of no-wrap flags if we failed to compute the sign bit
468   // directly.  This matters if the multiplication always overflows, in
469   // which case we prefer to follow the result of the direct computation,
470   // though as the program is invoking undefined behaviour we can choose
471   // whatever we like here.
472   if (isKnownNonNegative && !Known.isNegative())
473     Known.makeNonNegative();
474   else if (isKnownNegative && !Known.isNonNegative())
475     Known.makeNegative();
476 }
477 
478 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
479                                              KnownBits &Known) {
480   unsigned BitWidth = Known.getBitWidth();
481   unsigned NumRanges = Ranges.getNumOperands() / 2;
482   assert(NumRanges >= 1);
483 
484   Known.Zero.setAllBits();
485   Known.One.setAllBits();
486 
487   for (unsigned i = 0; i < NumRanges; ++i) {
488     ConstantInt *Lower =
489         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
490     ConstantInt *Upper =
491         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
492     ConstantRange Range(Lower->getValue(), Upper->getValue());
493 
494     // The first CommonPrefixBits of all values in Range are equal.
495     unsigned CommonPrefixBits =
496         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
497     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
498     APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
499     Known.One &= UnsignedMax & Mask;
500     Known.Zero &= ~UnsignedMax & Mask;
501   }
502 }
503 
504 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
505   SmallVector<const Value *, 16> WorkSet(1, I);
506   SmallPtrSet<const Value *, 32> Visited;
507   SmallPtrSet<const Value *, 16> EphValues;
508 
509   // The instruction defining an assumption's condition itself is always
510   // considered ephemeral to that assumption (even if it has other
511   // non-ephemeral users). See r246696's test case for an example.
512   if (is_contained(I->operands(), E))
513     return true;
514 
515   while (!WorkSet.empty()) {
516     const Value *V = WorkSet.pop_back_val();
517     if (!Visited.insert(V).second)
518       continue;
519 
520     // If all uses of this value are ephemeral, then so is this value.
521     if (llvm::all_of(V->users(), [&](const User *U) {
522                                    return EphValues.count(U);
523                                  })) {
524       if (V == E)
525         return true;
526 
527       if (V == I || isSafeToSpeculativelyExecute(V)) {
528        EphValues.insert(V);
529        if (const User *U = dyn_cast<User>(V))
530          append_range(WorkSet, U->operands());
531       }
532     }
533   }
534 
535   return false;
536 }
537 
538 // Is this an intrinsic that cannot be speculated but also cannot trap?
539 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
540   if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
541     return CI->isAssumeLikeIntrinsic();
542 
543   return false;
544 }
545 
546 bool llvm::isValidAssumeForContext(const Instruction *Inv,
547                                    const Instruction *CxtI,
548                                    const DominatorTree *DT) {
549   // There are two restrictions on the use of an assume:
550   //  1. The assume must dominate the context (or the control flow must
551   //     reach the assume whenever it reaches the context).
552   //  2. The context must not be in the assume's set of ephemeral values
553   //     (otherwise we will use the assume to prove that the condition
554   //     feeding the assume is trivially true, thus causing the removal of
555   //     the assume).
556 
557   if (Inv->getParent() == CxtI->getParent()) {
558     // If Inv and CtxI are in the same block, check if the assume (Inv) is first
559     // in the BB.
560     if (Inv->comesBefore(CxtI))
561       return true;
562 
563     // Don't let an assume affect itself - this would cause the problems
564     // `isEphemeralValueOf` is trying to prevent, and it would also make
565     // the loop below go out of bounds.
566     if (Inv == CxtI)
567       return false;
568 
569     // The context comes first, but they're both in the same block.
570     // Make sure there is nothing in between that might interrupt
571     // the control flow, not even CxtI itself.
572     // We limit the scan distance between the assume and its context instruction
573     // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
574     // it can be adjusted if needed (could be turned into a cl::opt).
575     unsigned ScanLimit = 15;
576     for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
577       if (!isGuaranteedToTransferExecutionToSuccessor(&*I) || --ScanLimit == 0)
578         return false;
579 
580     return !isEphemeralValueOf(Inv, CxtI);
581   }
582 
583   // Inv and CxtI are in different blocks.
584   if (DT) {
585     if (DT->dominates(Inv, CxtI))
586       return true;
587   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
588     // We don't have a DT, but this trivially dominates.
589     return true;
590   }
591 
592   return false;
593 }
594 
595 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
596   // v u> y implies v != 0.
597   if (Pred == ICmpInst::ICMP_UGT)
598     return true;
599 
600   // Special-case v != 0 to also handle v != null.
601   if (Pred == ICmpInst::ICMP_NE)
602     return match(RHS, m_Zero());
603 
604   // All other predicates - rely on generic ConstantRange handling.
605   const APInt *C;
606   if (!match(RHS, m_APInt(C)))
607     return false;
608 
609   ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
610   return !TrueValues.contains(APInt::getNullValue(C->getBitWidth()));
611 }
612 
613 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
614   // Use of assumptions is context-sensitive. If we don't have a context, we
615   // cannot use them!
616   if (!Q.AC || !Q.CxtI)
617     return false;
618 
619   if (Q.CxtI && V->getType()->isPointerTy()) {
620     SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
621     if (!NullPointerIsDefined(Q.CxtI->getFunction(),
622                               V->getType()->getPointerAddressSpace()))
623       AttrKinds.push_back(Attribute::Dereferenceable);
624 
625     if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
626       return true;
627   }
628 
629   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
630     if (!AssumeVH)
631       continue;
632     CallInst *I = cast<CallInst>(AssumeVH);
633     assert(I->getFunction() == Q.CxtI->getFunction() &&
634            "Got assumption for the wrong function!");
635     if (Q.isExcluded(I))
636       continue;
637 
638     // Warning: This loop can end up being somewhat performance sensitive.
639     // We're running this loop for once for each value queried resulting in a
640     // runtime of ~O(#assumes * #values).
641 
642     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
643            "must be an assume intrinsic");
644 
645     Value *RHS;
646     CmpInst::Predicate Pred;
647     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
648     if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
649       return false;
650 
651     if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
652       return true;
653   }
654 
655   return false;
656 }
657 
658 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
659                                        unsigned Depth, const Query &Q) {
660   // Use of assumptions is context-sensitive. If we don't have a context, we
661   // cannot use them!
662   if (!Q.AC || !Q.CxtI)
663     return;
664 
665   unsigned BitWidth = Known.getBitWidth();
666 
667   // Refine Known set if the pointer alignment is set by assume bundles.
668   if (V->getType()->isPointerTy()) {
669     if (RetainedKnowledge RK = getKnowledgeValidInContext(
670             V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
671       Known.Zero.setLowBits(Log2_32(RK.ArgValue));
672     }
673   }
674 
675   // Note that the patterns below need to be kept in sync with the code
676   // in AssumptionCache::updateAffectedValues.
677 
678   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
679     if (!AssumeVH)
680       continue;
681     CallInst *I = cast<CallInst>(AssumeVH);
682     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
683            "Got assumption for the wrong function!");
684     if (Q.isExcluded(I))
685       continue;
686 
687     // Warning: This loop can end up being somewhat performance sensitive.
688     // We're running this loop for once for each value queried resulting in a
689     // runtime of ~O(#assumes * #values).
690 
691     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
692            "must be an assume intrinsic");
693 
694     Value *Arg = I->getArgOperand(0);
695 
696     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
697       assert(BitWidth == 1 && "assume operand is not i1?");
698       Known.setAllOnes();
699       return;
700     }
701     if (match(Arg, m_Not(m_Specific(V))) &&
702         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
703       assert(BitWidth == 1 && "assume operand is not i1?");
704       Known.setAllZero();
705       return;
706     }
707 
708     // The remaining tests are all recursive, so bail out if we hit the limit.
709     if (Depth == MaxAnalysisRecursionDepth)
710       continue;
711 
712     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
713     if (!Cmp)
714       continue;
715 
716     // Note that ptrtoint may change the bitwidth.
717     Value *A, *B;
718     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
719 
720     CmpInst::Predicate Pred;
721     uint64_t C;
722     switch (Cmp->getPredicate()) {
723     default:
724       break;
725     case ICmpInst::ICMP_EQ:
726       // assume(v = a)
727       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
728           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
729         KnownBits RHSKnown =
730             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
731         Known.Zero |= RHSKnown.Zero;
732         Known.One  |= RHSKnown.One;
733       // assume(v & b = a)
734       } else if (match(Cmp,
735                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
736                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
737         KnownBits RHSKnown =
738             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
739         KnownBits MaskKnown =
740             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
741 
742         // For those bits in the mask that are known to be one, we can propagate
743         // known bits from the RHS to V.
744         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
745         Known.One  |= RHSKnown.One  & MaskKnown.One;
746       // assume(~(v & b) = a)
747       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
748                                      m_Value(A))) &&
749                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
750         KnownBits RHSKnown =
751             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
752         KnownBits MaskKnown =
753             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
754 
755         // For those bits in the mask that are known to be one, we can propagate
756         // inverted known bits from the RHS to V.
757         Known.Zero |= RHSKnown.One  & MaskKnown.One;
758         Known.One  |= RHSKnown.Zero & MaskKnown.One;
759       // assume(v | b = a)
760       } else if (match(Cmp,
761                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
762                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
763         KnownBits RHSKnown =
764             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
765         KnownBits BKnown =
766             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
767 
768         // For those bits in B that are known to be zero, we can propagate known
769         // bits from the RHS to V.
770         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
771         Known.One  |= RHSKnown.One  & BKnown.Zero;
772       // assume(~(v | b) = a)
773       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
774                                      m_Value(A))) &&
775                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
776         KnownBits RHSKnown =
777             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
778         KnownBits BKnown =
779             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
780 
781         // For those bits in B that are known to be zero, we can propagate
782         // inverted known bits from the RHS to V.
783         Known.Zero |= RHSKnown.One  & BKnown.Zero;
784         Known.One  |= RHSKnown.Zero & BKnown.Zero;
785       // assume(v ^ b = a)
786       } else if (match(Cmp,
787                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
788                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
789         KnownBits RHSKnown =
790             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
791         KnownBits BKnown =
792             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
793 
794         // For those bits in B that are known to be zero, we can propagate known
795         // bits from the RHS to V. For those bits in B that are known to be one,
796         // we can propagate inverted known bits from the RHS to V.
797         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
798         Known.One  |= RHSKnown.One  & BKnown.Zero;
799         Known.Zero |= RHSKnown.One  & BKnown.One;
800         Known.One  |= RHSKnown.Zero & BKnown.One;
801       // assume(~(v ^ b) = a)
802       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
803                                      m_Value(A))) &&
804                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
805         KnownBits RHSKnown =
806             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
807         KnownBits BKnown =
808             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
809 
810         // For those bits in B that are known to be zero, we can propagate
811         // inverted known bits from the RHS to V. For those bits in B that are
812         // known to be one, we can propagate known bits from the RHS to V.
813         Known.Zero |= RHSKnown.One  & BKnown.Zero;
814         Known.One  |= RHSKnown.Zero & BKnown.Zero;
815         Known.Zero |= RHSKnown.Zero & BKnown.One;
816         Known.One  |= RHSKnown.One  & BKnown.One;
817       // assume(v << c = a)
818       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
819                                      m_Value(A))) &&
820                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
821         KnownBits RHSKnown =
822             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
823 
824         // For those bits in RHS that are known, we can propagate them to known
825         // bits in V shifted to the right by C.
826         RHSKnown.Zero.lshrInPlace(C);
827         Known.Zero |= RHSKnown.Zero;
828         RHSKnown.One.lshrInPlace(C);
829         Known.One  |= RHSKnown.One;
830       // assume(~(v << c) = a)
831       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
832                                      m_Value(A))) &&
833                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
834         KnownBits RHSKnown =
835             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
836         // For those bits in RHS that are known, we can propagate them inverted
837         // to known bits in V shifted to the right by C.
838         RHSKnown.One.lshrInPlace(C);
839         Known.Zero |= RHSKnown.One;
840         RHSKnown.Zero.lshrInPlace(C);
841         Known.One  |= RHSKnown.Zero;
842       // assume(v >> c = a)
843       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
844                                      m_Value(A))) &&
845                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
846         KnownBits RHSKnown =
847             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
848         // For those bits in RHS that are known, we can propagate them to known
849         // bits in V shifted to the right by C.
850         Known.Zero |= RHSKnown.Zero << C;
851         Known.One  |= RHSKnown.One  << C;
852       // assume(~(v >> c) = a)
853       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
854                                      m_Value(A))) &&
855                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
856         KnownBits RHSKnown =
857             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
858         // For those bits in RHS that are known, we can propagate them inverted
859         // to known bits in V shifted to the right by C.
860         Known.Zero |= RHSKnown.One  << C;
861         Known.One  |= RHSKnown.Zero << C;
862       }
863       break;
864     case ICmpInst::ICMP_SGE:
865       // assume(v >=_s c) where c is non-negative
866       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
867           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
868         KnownBits RHSKnown =
869             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
870 
871         if (RHSKnown.isNonNegative()) {
872           // We know that the sign bit is zero.
873           Known.makeNonNegative();
874         }
875       }
876       break;
877     case ICmpInst::ICMP_SGT:
878       // assume(v >_s c) where c is at least -1.
879       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
880           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
881         KnownBits RHSKnown =
882             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
883 
884         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
885           // We know that the sign bit is zero.
886           Known.makeNonNegative();
887         }
888       }
889       break;
890     case ICmpInst::ICMP_SLE:
891       // assume(v <=_s c) where c is negative
892       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
893           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
894         KnownBits RHSKnown =
895             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
896 
897         if (RHSKnown.isNegative()) {
898           // We know that the sign bit is one.
899           Known.makeNegative();
900         }
901       }
902       break;
903     case ICmpInst::ICMP_SLT:
904       // assume(v <_s c) where c is non-positive
905       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
906           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
907         KnownBits RHSKnown =
908             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
909 
910         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
911           // We know that the sign bit is one.
912           Known.makeNegative();
913         }
914       }
915       break;
916     case ICmpInst::ICMP_ULE:
917       // assume(v <=_u c)
918       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
919           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
920         KnownBits RHSKnown =
921             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
922 
923         // Whatever high bits in c are zero are known to be zero.
924         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
925       }
926       break;
927     case ICmpInst::ICMP_ULT:
928       // assume(v <_u c)
929       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
930           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
931         KnownBits RHSKnown =
932             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
933 
934         // If the RHS is known zero, then this assumption must be wrong (nothing
935         // is unsigned less than zero). Signal a conflict and get out of here.
936         if (RHSKnown.isZero()) {
937           Known.Zero.setAllBits();
938           Known.One.setAllBits();
939           break;
940         }
941 
942         // Whatever high bits in c are zero are known to be zero (if c is a power
943         // of 2, then one more).
944         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
945           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
946         else
947           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
948       }
949       break;
950     }
951   }
952 
953   // If assumptions conflict with each other or previous known bits, then we
954   // have a logical fallacy. It's possible that the assumption is not reachable,
955   // so this isn't a real bug. On the other hand, the program may have undefined
956   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
957   // clear out the known bits, try to warn the user, and hope for the best.
958   if (Known.Zero.intersects(Known.One)) {
959     Known.resetAll();
960 
961     if (Q.ORE)
962       Q.ORE->emit([&]() {
963         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
964         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
965                                           CxtI)
966                << "Detected conflicting code assumptions. Program may "
967                   "have undefined behavior, or compiler may have "
968                   "internal error.";
969       });
970   }
971 }
972 
973 /// Compute known bits from a shift operator, including those with a
974 /// non-constant shift amount. Known is the output of this function. Known2 is a
975 /// pre-allocated temporary with the same bit width as Known and on return
976 /// contains the known bit of the shift value source. KF is an
977 /// operator-specific function that, given the known-bits and a shift amount,
978 /// compute the implied known-bits of the shift operator's result respectively
979 /// for that shift amount. The results from calling KF are conservatively
980 /// combined for all permitted shift amounts.
981 static void computeKnownBitsFromShiftOperator(
982     const Operator *I, const APInt &DemandedElts, KnownBits &Known,
983     KnownBits &Known2, unsigned Depth, const Query &Q,
984     function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
985   unsigned BitWidth = Known.getBitWidth();
986   computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
987   computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
988 
989   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
990   // BitWidth > 64 and any upper bits are known, we'll end up returning the
991   // limit value (which implies all bits are known).
992   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
993   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
994   bool ShiftAmtIsConstant = Known.isConstant();
995   bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
996 
997   if (ShiftAmtIsConstant) {
998     Known = KF(Known2, Known);
999 
1000     // If the known bits conflict, this must be an overflowing left shift, so
1001     // the shift result is poison. We can return anything we want. Choose 0 for
1002     // the best folding opportunity.
1003     if (Known.hasConflict())
1004       Known.setAllZero();
1005 
1006     return;
1007   }
1008 
1009   // If the shift amount could be greater than or equal to the bit-width of the
1010   // LHS, the value could be poison, but bail out because the check below is
1011   // expensive.
1012   // TODO: Should we just carry on?
1013   if (MaxShiftAmtIsOutOfRange) {
1014     Known.resetAll();
1015     return;
1016   }
1017 
1018   // It would be more-clearly correct to use the two temporaries for this
1019   // calculation. Reusing the APInts here to prevent unnecessary allocations.
1020   Known.resetAll();
1021 
1022   // If we know the shifter operand is nonzero, we can sometimes infer more
1023   // known bits. However this is expensive to compute, so be lazy about it and
1024   // only compute it when absolutely necessary.
1025   Optional<bool> ShifterOperandIsNonZero;
1026 
1027   // Early exit if we can't constrain any well-defined shift amount.
1028   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1029       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1030     ShifterOperandIsNonZero =
1031         isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1032     if (!*ShifterOperandIsNonZero)
1033       return;
1034   }
1035 
1036   Known.Zero.setAllBits();
1037   Known.One.setAllBits();
1038   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1039     // Combine the shifted known input bits only for those shift amounts
1040     // compatible with its known constraints.
1041     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1042       continue;
1043     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1044       continue;
1045     // If we know the shifter is nonzero, we may be able to infer more known
1046     // bits. This check is sunk down as far as possible to avoid the expensive
1047     // call to isKnownNonZero if the cheaper checks above fail.
1048     if (ShiftAmt == 0) {
1049       if (!ShifterOperandIsNonZero.hasValue())
1050         ShifterOperandIsNonZero =
1051             isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1052       if (*ShifterOperandIsNonZero)
1053         continue;
1054     }
1055 
1056     Known = KnownBits::commonBits(
1057         Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1058   }
1059 
1060   // If the known bits conflict, the result is poison. Return a 0 and hope the
1061   // caller can further optimize that.
1062   if (Known.hasConflict())
1063     Known.setAllZero();
1064 }
1065 
1066 static void computeKnownBitsFromOperator(const Operator *I,
1067                                          const APInt &DemandedElts,
1068                                          KnownBits &Known, unsigned Depth,
1069                                          const Query &Q) {
1070   unsigned BitWidth = Known.getBitWidth();
1071 
1072   KnownBits Known2(BitWidth);
1073   switch (I->getOpcode()) {
1074   default: break;
1075   case Instruction::Load:
1076     if (MDNode *MD =
1077             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1078       computeKnownBitsFromRangeMetadata(*MD, Known);
1079     break;
1080   case Instruction::And: {
1081     // If either the LHS or the RHS are Zero, the result is zero.
1082     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1083     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1084 
1085     Known &= Known2;
1086 
1087     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1088     // here we handle the more general case of adding any odd number by
1089     // matching the form add(x, add(x, y)) where y is odd.
1090     // TODO: This could be generalized to clearing any bit set in y where the
1091     // following bit is known to be unset in y.
1092     Value *X = nullptr, *Y = nullptr;
1093     if (!Known.Zero[0] && !Known.One[0] &&
1094         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1095       Known2.resetAll();
1096       computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1097       if (Known2.countMinTrailingOnes() > 0)
1098         Known.Zero.setBit(0);
1099     }
1100     break;
1101   }
1102   case Instruction::Or:
1103     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1104     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1105 
1106     Known |= Known2;
1107     break;
1108   case Instruction::Xor:
1109     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1110     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1111 
1112     Known ^= Known2;
1113     break;
1114   case Instruction::Mul: {
1115     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1116     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1117                         Known, Known2, Depth, Q);
1118     break;
1119   }
1120   case Instruction::UDiv: {
1121     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1122     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1123     Known = KnownBits::udiv(Known, Known2);
1124     break;
1125   }
1126   case Instruction::Select: {
1127     const Value *LHS = nullptr, *RHS = nullptr;
1128     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1129     if (SelectPatternResult::isMinOrMax(SPF)) {
1130       computeKnownBits(RHS, Known, Depth + 1, Q);
1131       computeKnownBits(LHS, Known2, Depth + 1, Q);
1132       switch (SPF) {
1133       default:
1134         llvm_unreachable("Unhandled select pattern flavor!");
1135       case SPF_SMAX:
1136         Known = KnownBits::smax(Known, Known2);
1137         break;
1138       case SPF_SMIN:
1139         Known = KnownBits::smin(Known, Known2);
1140         break;
1141       case SPF_UMAX:
1142         Known = KnownBits::umax(Known, Known2);
1143         break;
1144       case SPF_UMIN:
1145         Known = KnownBits::umin(Known, Known2);
1146         break;
1147       }
1148       break;
1149     }
1150 
1151     computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1152     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1153 
1154     // Only known if known in both the LHS and RHS.
1155     Known = KnownBits::commonBits(Known, Known2);
1156 
1157     if (SPF == SPF_ABS) {
1158       // RHS from matchSelectPattern returns the negation part of abs pattern.
1159       // If the negate has an NSW flag we can assume the sign bit of the result
1160       // will be 0 because that makes abs(INT_MIN) undefined.
1161       if (match(RHS, m_Neg(m_Specific(LHS))) &&
1162           Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1163         Known.Zero.setSignBit();
1164     }
1165 
1166     break;
1167   }
1168   case Instruction::FPTrunc:
1169   case Instruction::FPExt:
1170   case Instruction::FPToUI:
1171   case Instruction::FPToSI:
1172   case Instruction::SIToFP:
1173   case Instruction::UIToFP:
1174     break; // Can't work with floating point.
1175   case Instruction::PtrToInt:
1176   case Instruction::IntToPtr:
1177     // Fall through and handle them the same as zext/trunc.
1178     LLVM_FALLTHROUGH;
1179   case Instruction::ZExt:
1180   case Instruction::Trunc: {
1181     Type *SrcTy = I->getOperand(0)->getType();
1182 
1183     unsigned SrcBitWidth;
1184     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1185     // which fall through here.
1186     Type *ScalarTy = SrcTy->getScalarType();
1187     SrcBitWidth = ScalarTy->isPointerTy() ?
1188       Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1189       Q.DL.getTypeSizeInBits(ScalarTy);
1190 
1191     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1192     Known = Known.anyextOrTrunc(SrcBitWidth);
1193     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1194     Known = Known.zextOrTrunc(BitWidth);
1195     break;
1196   }
1197   case Instruction::BitCast: {
1198     Type *SrcTy = I->getOperand(0)->getType();
1199     if (SrcTy->isIntOrPtrTy() &&
1200         // TODO: For now, not handling conversions like:
1201         // (bitcast i64 %x to <2 x i32>)
1202         !I->getType()->isVectorTy()) {
1203       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1204       break;
1205     }
1206     break;
1207   }
1208   case Instruction::SExt: {
1209     // Compute the bits in the result that are not present in the input.
1210     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1211 
1212     Known = Known.trunc(SrcBitWidth);
1213     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1214     // If the sign bit of the input is known set or clear, then we know the
1215     // top bits of the result.
1216     Known = Known.sext(BitWidth);
1217     break;
1218   }
1219   case Instruction::Shl: {
1220     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1221     auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1222       KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1223       // If this shift has "nsw" keyword, then the result is either a poison
1224       // value or has the same sign bit as the first operand.
1225       if (NSW) {
1226         if (KnownVal.Zero.isSignBitSet())
1227           Result.Zero.setSignBit();
1228         if (KnownVal.One.isSignBitSet())
1229           Result.One.setSignBit();
1230       }
1231       return Result;
1232     };
1233     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1234                                       KF);
1235     // Trailing zeros of a right-shifted constant never decrease.
1236     const APInt *C;
1237     if (match(I->getOperand(0), m_APInt(C)))
1238       Known.Zero.setLowBits(C->countTrailingZeros());
1239     break;
1240   }
1241   case Instruction::LShr: {
1242     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1243       return KnownBits::lshr(KnownVal, KnownAmt);
1244     };
1245     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1246                                       KF);
1247     // Leading zeros of a left-shifted constant never decrease.
1248     const APInt *C;
1249     if (match(I->getOperand(0), m_APInt(C)))
1250       Known.Zero.setHighBits(C->countLeadingZeros());
1251     break;
1252   }
1253   case Instruction::AShr: {
1254     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1255       return KnownBits::ashr(KnownVal, KnownAmt);
1256     };
1257     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1258                                       KF);
1259     break;
1260   }
1261   case Instruction::Sub: {
1262     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1263     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1264                            DemandedElts, Known, Known2, Depth, Q);
1265     break;
1266   }
1267   case Instruction::Add: {
1268     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1269     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1270                            DemandedElts, Known, Known2, Depth, Q);
1271     break;
1272   }
1273   case Instruction::SRem:
1274     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1275     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1276     Known = KnownBits::srem(Known, Known2);
1277     break;
1278 
1279   case Instruction::URem:
1280     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1281     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1282     Known = KnownBits::urem(Known, Known2);
1283     break;
1284   case Instruction::Alloca:
1285     Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1286     break;
1287   case Instruction::GetElementPtr: {
1288     // Analyze all of the subscripts of this getelementptr instruction
1289     // to determine if we can prove known low zero bits.
1290     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1291     // Accumulate the constant indices in a separate variable
1292     // to minimize the number of calls to computeForAddSub.
1293     APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1294 
1295     gep_type_iterator GTI = gep_type_begin(I);
1296     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1297       // TrailZ can only become smaller, short-circuit if we hit zero.
1298       if (Known.isUnknown())
1299         break;
1300 
1301       Value *Index = I->getOperand(i);
1302 
1303       // Handle case when index is zero.
1304       Constant *CIndex = dyn_cast<Constant>(Index);
1305       if (CIndex && CIndex->isZeroValue())
1306         continue;
1307 
1308       if (StructType *STy = GTI.getStructTypeOrNull()) {
1309         // Handle struct member offset arithmetic.
1310 
1311         assert(CIndex &&
1312                "Access to structure field must be known at compile time");
1313 
1314         if (CIndex->getType()->isVectorTy())
1315           Index = CIndex->getSplatValue();
1316 
1317         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1318         const StructLayout *SL = Q.DL.getStructLayout(STy);
1319         uint64_t Offset = SL->getElementOffset(Idx);
1320         AccConstIndices += Offset;
1321         continue;
1322       }
1323 
1324       // Handle array index arithmetic.
1325       Type *IndexedTy = GTI.getIndexedType();
1326       if (!IndexedTy->isSized()) {
1327         Known.resetAll();
1328         break;
1329       }
1330 
1331       unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1332       KnownBits IndexBits(IndexBitWidth);
1333       computeKnownBits(Index, IndexBits, Depth + 1, Q);
1334       TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1335       uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1336       KnownBits ScalingFactor(IndexBitWidth);
1337       // Multiply by current sizeof type.
1338       // &A[i] == A + i * sizeof(*A[i]).
1339       if (IndexTypeSize.isScalable()) {
1340         // For scalable types the only thing we know about sizeof is
1341         // that this is a multiple of the minimum size.
1342         ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1343       } else if (IndexBits.isConstant()) {
1344         APInt IndexConst = IndexBits.getConstant();
1345         APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1346         IndexConst *= ScalingFactor;
1347         AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1348         continue;
1349       } else {
1350         ScalingFactor =
1351             KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1352       }
1353       IndexBits = KnownBits::computeForMul(IndexBits, ScalingFactor);
1354 
1355       // If the offsets have a different width from the pointer, according
1356       // to the language reference we need to sign-extend or truncate them
1357       // to the width of the pointer.
1358       IndexBits = IndexBits.sextOrTrunc(BitWidth);
1359 
1360       // Note that inbounds does *not* guarantee nsw for the addition, as only
1361       // the offset is signed, while the base address is unsigned.
1362       Known = KnownBits::computeForAddSub(
1363           /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1364     }
1365     if (!Known.isUnknown() && !AccConstIndices.isNullValue()) {
1366       KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1367       Known = KnownBits::computeForAddSub(
1368           /*Add=*/true, /*NSW=*/false, Known, Index);
1369     }
1370     break;
1371   }
1372   case Instruction::PHI: {
1373     const PHINode *P = cast<PHINode>(I);
1374     BinaryOperator *BO = nullptr;
1375     Value *R = nullptr, *L = nullptr;
1376     if (matchSimpleRecurrence(P, BO, R, L)) {
1377       // Handle the case of a simple two-predecessor recurrence PHI.
1378       // There's a lot more that could theoretically be done here, but
1379       // this is sufficient to catch some interesting cases.
1380       unsigned Opcode = BO->getOpcode();
1381 
1382       // If this is a shift recurrence, we know the bits being shifted in.
1383       // We can combine that with information about the start value of the
1384       // recurrence to conclude facts about the result.
1385       if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1386            Opcode == Instruction::Shl) &&
1387           BO->getOperand(0) == I) {
1388 
1389         // We have matched a recurrence of the form:
1390         // %iv = [R, %entry], [%iv.next, %backedge]
1391         // %iv.next = shift_op %iv, L
1392 
1393         // Recurse with the phi context to avoid concern about whether facts
1394         // inferred hold at original context instruction.  TODO: It may be
1395         // correct to use the original context.  IF warranted, explore and
1396         // add sufficient tests to cover.
1397         Query RecQ = Q;
1398         RecQ.CxtI = P;
1399         computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1400         switch (Opcode) {
1401         case Instruction::Shl:
1402           // A shl recurrence will only increase the tailing zeros
1403           Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1404           break;
1405         case Instruction::LShr:
1406           // A lshr recurrence will preserve the leading zeros of the
1407           // start value
1408           Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1409           break;
1410         case Instruction::AShr:
1411           // An ashr recurrence will extend the initial sign bit
1412           Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1413           Known.One.setHighBits(Known2.countMinLeadingOnes());
1414           break;
1415         };
1416       }
1417 
1418       // Check for operations that have the property that if
1419       // both their operands have low zero bits, the result
1420       // will have low zero bits.
1421       if (Opcode == Instruction::Add ||
1422           Opcode == Instruction::Sub ||
1423           Opcode == Instruction::And ||
1424           Opcode == Instruction::Or ||
1425           Opcode == Instruction::Mul) {
1426         // Change the context instruction to the "edge" that flows into the
1427         // phi. This is important because that is where the value is actually
1428         // "evaluated" even though it is used later somewhere else. (see also
1429         // D69571).
1430         Query RecQ = Q;
1431 
1432         unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1433         Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1434         Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1435 
1436         // Ok, we have a PHI of the form L op= R. Check for low
1437         // zero bits.
1438         RecQ.CxtI = RInst;
1439         computeKnownBits(R, Known2, Depth + 1, RecQ);
1440 
1441         // We need to take the minimum number of known bits
1442         KnownBits Known3(BitWidth);
1443         RecQ.CxtI = LInst;
1444         computeKnownBits(L, Known3, Depth + 1, RecQ);
1445 
1446         Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1447                                        Known3.countMinTrailingZeros()));
1448 
1449         auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1450         if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1451           // If initial value of recurrence is nonnegative, and we are adding
1452           // a nonnegative number with nsw, the result can only be nonnegative
1453           // or poison value regardless of the number of times we execute the
1454           // add in phi recurrence. If initial value is negative and we are
1455           // adding a negative number with nsw, the result can only be
1456           // negative or poison value. Similar arguments apply to sub and mul.
1457           //
1458           // (add non-negative, non-negative) --> non-negative
1459           // (add negative, negative) --> negative
1460           if (Opcode == Instruction::Add) {
1461             if (Known2.isNonNegative() && Known3.isNonNegative())
1462               Known.makeNonNegative();
1463             else if (Known2.isNegative() && Known3.isNegative())
1464               Known.makeNegative();
1465           }
1466 
1467           // (sub nsw non-negative, negative) --> non-negative
1468           // (sub nsw negative, non-negative) --> negative
1469           else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1470             if (Known2.isNonNegative() && Known3.isNegative())
1471               Known.makeNonNegative();
1472             else if (Known2.isNegative() && Known3.isNonNegative())
1473               Known.makeNegative();
1474           }
1475 
1476           // (mul nsw non-negative, non-negative) --> non-negative
1477           else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1478                    Known3.isNonNegative())
1479             Known.makeNonNegative();
1480         }
1481 
1482         break;
1483       }
1484     }
1485 
1486     // Unreachable blocks may have zero-operand PHI nodes.
1487     if (P->getNumIncomingValues() == 0)
1488       break;
1489 
1490     // Otherwise take the unions of the known bit sets of the operands,
1491     // taking conservative care to avoid excessive recursion.
1492     if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1493       // Skip if every incoming value references to ourself.
1494       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1495         break;
1496 
1497       Known.Zero.setAllBits();
1498       Known.One.setAllBits();
1499       for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1500         Value *IncValue = P->getIncomingValue(u);
1501         // Skip direct self references.
1502         if (IncValue == P) continue;
1503 
1504         // Change the context instruction to the "edge" that flows into the
1505         // phi. This is important because that is where the value is actually
1506         // "evaluated" even though it is used later somewhere else. (see also
1507         // D69571).
1508         Query RecQ = Q;
1509         RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1510 
1511         Known2 = KnownBits(BitWidth);
1512         // Recurse, but cap the recursion to one level, because we don't
1513         // want to waste time spinning around in loops.
1514         computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1515         Known = KnownBits::commonBits(Known, Known2);
1516         // If all bits have been ruled out, there's no need to check
1517         // more operands.
1518         if (Known.isUnknown())
1519           break;
1520       }
1521     }
1522     break;
1523   }
1524   case Instruction::Call:
1525   case Instruction::Invoke:
1526     // If range metadata is attached to this call, set known bits from that,
1527     // and then intersect with known bits based on other properties of the
1528     // function.
1529     if (MDNode *MD =
1530             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1531       computeKnownBitsFromRangeMetadata(*MD, Known);
1532     if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1533       computeKnownBits(RV, Known2, Depth + 1, Q);
1534       Known.Zero |= Known2.Zero;
1535       Known.One |= Known2.One;
1536     }
1537     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1538       switch (II->getIntrinsicID()) {
1539       default: break;
1540       case Intrinsic::abs: {
1541         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1542         bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1543         Known = Known2.abs(IntMinIsPoison);
1544         break;
1545       }
1546       case Intrinsic::bitreverse:
1547         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1548         Known.Zero |= Known2.Zero.reverseBits();
1549         Known.One |= Known2.One.reverseBits();
1550         break;
1551       case Intrinsic::bswap:
1552         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1553         Known.Zero |= Known2.Zero.byteSwap();
1554         Known.One |= Known2.One.byteSwap();
1555         break;
1556       case Intrinsic::ctlz: {
1557         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1558         // If we have a known 1, its position is our upper bound.
1559         unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1560         // If this call is undefined for 0, the result will be less than 2^n.
1561         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1562           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1563         unsigned LowBits = Log2_32(PossibleLZ)+1;
1564         Known.Zero.setBitsFrom(LowBits);
1565         break;
1566       }
1567       case Intrinsic::cttz: {
1568         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1569         // If we have a known 1, its position is our upper bound.
1570         unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1571         // If this call is undefined for 0, the result will be less than 2^n.
1572         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1573           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1574         unsigned LowBits = Log2_32(PossibleTZ)+1;
1575         Known.Zero.setBitsFrom(LowBits);
1576         break;
1577       }
1578       case Intrinsic::ctpop: {
1579         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1580         // We can bound the space the count needs.  Also, bits known to be zero
1581         // can't contribute to the population.
1582         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1583         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1584         Known.Zero.setBitsFrom(LowBits);
1585         // TODO: we could bound KnownOne using the lower bound on the number
1586         // of bits which might be set provided by popcnt KnownOne2.
1587         break;
1588       }
1589       case Intrinsic::fshr:
1590       case Intrinsic::fshl: {
1591         const APInt *SA;
1592         if (!match(I->getOperand(2), m_APInt(SA)))
1593           break;
1594 
1595         // Normalize to funnel shift left.
1596         uint64_t ShiftAmt = SA->urem(BitWidth);
1597         if (II->getIntrinsicID() == Intrinsic::fshr)
1598           ShiftAmt = BitWidth - ShiftAmt;
1599 
1600         KnownBits Known3(BitWidth);
1601         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1602         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1603 
1604         Known.Zero =
1605             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1606         Known.One =
1607             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1608         break;
1609       }
1610       case Intrinsic::uadd_sat:
1611       case Intrinsic::usub_sat: {
1612         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1613         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1614         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1615 
1616         // Add: Leading ones of either operand are preserved.
1617         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1618         // as leading zeros in the result.
1619         unsigned LeadingKnown;
1620         if (IsAdd)
1621           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1622                                   Known2.countMinLeadingOnes());
1623         else
1624           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1625                                   Known2.countMinLeadingOnes());
1626 
1627         Known = KnownBits::computeForAddSub(
1628             IsAdd, /* NSW */ false, Known, Known2);
1629 
1630         // We select between the operation result and all-ones/zero
1631         // respectively, so we can preserve known ones/zeros.
1632         if (IsAdd) {
1633           Known.One.setHighBits(LeadingKnown);
1634           Known.Zero.clearAllBits();
1635         } else {
1636           Known.Zero.setHighBits(LeadingKnown);
1637           Known.One.clearAllBits();
1638         }
1639         break;
1640       }
1641       case Intrinsic::umin:
1642         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1643         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1644         Known = KnownBits::umin(Known, Known2);
1645         break;
1646       case Intrinsic::umax:
1647         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1648         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1649         Known = KnownBits::umax(Known, Known2);
1650         break;
1651       case Intrinsic::smin:
1652         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1653         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1654         Known = KnownBits::smin(Known, Known2);
1655         break;
1656       case Intrinsic::smax:
1657         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1658         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1659         Known = KnownBits::smax(Known, Known2);
1660         break;
1661       case Intrinsic::x86_sse42_crc32_64_64:
1662         Known.Zero.setBitsFrom(32);
1663         break;
1664       }
1665     }
1666     break;
1667   case Instruction::ShuffleVector: {
1668     auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1669     // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1670     if (!Shuf) {
1671       Known.resetAll();
1672       return;
1673     }
1674     // For undef elements, we don't know anything about the common state of
1675     // the shuffle result.
1676     APInt DemandedLHS, DemandedRHS;
1677     if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1678       Known.resetAll();
1679       return;
1680     }
1681     Known.One.setAllBits();
1682     Known.Zero.setAllBits();
1683     if (!!DemandedLHS) {
1684       const Value *LHS = Shuf->getOperand(0);
1685       computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1686       // If we don't know any bits, early out.
1687       if (Known.isUnknown())
1688         break;
1689     }
1690     if (!!DemandedRHS) {
1691       const Value *RHS = Shuf->getOperand(1);
1692       computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1693       Known = KnownBits::commonBits(Known, Known2);
1694     }
1695     break;
1696   }
1697   case Instruction::InsertElement: {
1698     const Value *Vec = I->getOperand(0);
1699     const Value *Elt = I->getOperand(1);
1700     auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1701     // Early out if the index is non-constant or out-of-range.
1702     unsigned NumElts = DemandedElts.getBitWidth();
1703     if (!CIdx || CIdx->getValue().uge(NumElts)) {
1704       Known.resetAll();
1705       return;
1706     }
1707     Known.One.setAllBits();
1708     Known.Zero.setAllBits();
1709     unsigned EltIdx = CIdx->getZExtValue();
1710     // Do we demand the inserted element?
1711     if (DemandedElts[EltIdx]) {
1712       computeKnownBits(Elt, Known, Depth + 1, Q);
1713       // If we don't know any bits, early out.
1714       if (Known.isUnknown())
1715         break;
1716     }
1717     // We don't need the base vector element that has been inserted.
1718     APInt DemandedVecElts = DemandedElts;
1719     DemandedVecElts.clearBit(EltIdx);
1720     if (!!DemandedVecElts) {
1721       computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1722       Known = KnownBits::commonBits(Known, Known2);
1723     }
1724     break;
1725   }
1726   case Instruction::ExtractElement: {
1727     // Look through extract element. If the index is non-constant or
1728     // out-of-range demand all elements, otherwise just the extracted element.
1729     const Value *Vec = I->getOperand(0);
1730     const Value *Idx = I->getOperand(1);
1731     auto *CIdx = dyn_cast<ConstantInt>(Idx);
1732     if (isa<ScalableVectorType>(Vec->getType())) {
1733       // FIXME: there's probably *something* we can do with scalable vectors
1734       Known.resetAll();
1735       break;
1736     }
1737     unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1738     APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1739     if (CIdx && CIdx->getValue().ult(NumElts))
1740       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1741     computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1742     break;
1743   }
1744   case Instruction::ExtractValue:
1745     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1746       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1747       if (EVI->getNumIndices() != 1) break;
1748       if (EVI->getIndices()[0] == 0) {
1749         switch (II->getIntrinsicID()) {
1750         default: break;
1751         case Intrinsic::uadd_with_overflow:
1752         case Intrinsic::sadd_with_overflow:
1753           computeKnownBitsAddSub(true, II->getArgOperand(0),
1754                                  II->getArgOperand(1), false, DemandedElts,
1755                                  Known, Known2, Depth, Q);
1756           break;
1757         case Intrinsic::usub_with_overflow:
1758         case Intrinsic::ssub_with_overflow:
1759           computeKnownBitsAddSub(false, II->getArgOperand(0),
1760                                  II->getArgOperand(1), false, DemandedElts,
1761                                  Known, Known2, Depth, Q);
1762           break;
1763         case Intrinsic::umul_with_overflow:
1764         case Intrinsic::smul_with_overflow:
1765           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1766                               DemandedElts, Known, Known2, Depth, Q);
1767           break;
1768         }
1769       }
1770     }
1771     break;
1772   case Instruction::Freeze:
1773     if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1774                                   Depth + 1))
1775       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1776     break;
1777   }
1778 }
1779 
1780 /// Determine which bits of V are known to be either zero or one and return
1781 /// them.
1782 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1783                            unsigned Depth, const Query &Q) {
1784   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1785   computeKnownBits(V, DemandedElts, Known, Depth, Q);
1786   return Known;
1787 }
1788 
1789 /// Determine which bits of V are known to be either zero or one and return
1790 /// them.
1791 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1792   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1793   computeKnownBits(V, Known, Depth, Q);
1794   return Known;
1795 }
1796 
1797 /// Determine which bits of V are known to be either zero or one and return
1798 /// them in the Known bit set.
1799 ///
1800 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1801 /// we cannot optimize based on the assumption that it is zero without changing
1802 /// it to be an explicit zero.  If we don't change it to zero, other code could
1803 /// optimized based on the contradictory assumption that it is non-zero.
1804 /// Because instcombine aggressively folds operations with undef args anyway,
1805 /// this won't lose us code quality.
1806 ///
1807 /// This function is defined on values with integer type, values with pointer
1808 /// type, and vectors of integers.  In the case
1809 /// where V is a vector, known zero, and known one values are the
1810 /// same width as the vector element, and the bit is set only if it is true
1811 /// for all of the demanded elements in the vector specified by DemandedElts.
1812 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1813                       KnownBits &Known, unsigned Depth, const Query &Q) {
1814   if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1815     // No demanded elts or V is a scalable vector, better to assume we don't
1816     // know anything.
1817     Known.resetAll();
1818     return;
1819   }
1820 
1821   assert(V && "No Value?");
1822   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1823 
1824 #ifndef NDEBUG
1825   Type *Ty = V->getType();
1826   unsigned BitWidth = Known.getBitWidth();
1827 
1828   assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1829          "Not integer or pointer type!");
1830 
1831   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1832     assert(
1833         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1834         "DemandedElt width should equal the fixed vector number of elements");
1835   } else {
1836     assert(DemandedElts == APInt(1, 1) &&
1837            "DemandedElt width should be 1 for scalars");
1838   }
1839 
1840   Type *ScalarTy = Ty->getScalarType();
1841   if (ScalarTy->isPointerTy()) {
1842     assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1843            "V and Known should have same BitWidth");
1844   } else {
1845     assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1846            "V and Known should have same BitWidth");
1847   }
1848 #endif
1849 
1850   const APInt *C;
1851   if (match(V, m_APInt(C))) {
1852     // We know all of the bits for a scalar constant or a splat vector constant!
1853     Known = KnownBits::makeConstant(*C);
1854     return;
1855   }
1856   // Null and aggregate-zero are all-zeros.
1857   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1858     Known.setAllZero();
1859     return;
1860   }
1861   // Handle a constant vector by taking the intersection of the known bits of
1862   // each element.
1863   if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1864     // We know that CDV must be a vector of integers. Take the intersection of
1865     // each element.
1866     Known.Zero.setAllBits(); Known.One.setAllBits();
1867     for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1868       if (!DemandedElts[i])
1869         continue;
1870       APInt Elt = CDV->getElementAsAPInt(i);
1871       Known.Zero &= ~Elt;
1872       Known.One &= Elt;
1873     }
1874     return;
1875   }
1876 
1877   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1878     // We know that CV must be a vector of integers. Take the intersection of
1879     // each element.
1880     Known.Zero.setAllBits(); Known.One.setAllBits();
1881     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1882       if (!DemandedElts[i])
1883         continue;
1884       Constant *Element = CV->getAggregateElement(i);
1885       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1886       if (!ElementCI) {
1887         Known.resetAll();
1888         return;
1889       }
1890       const APInt &Elt = ElementCI->getValue();
1891       Known.Zero &= ~Elt;
1892       Known.One &= Elt;
1893     }
1894     return;
1895   }
1896 
1897   // Start out not knowing anything.
1898   Known.resetAll();
1899 
1900   // We can't imply anything about undefs.
1901   if (isa<UndefValue>(V))
1902     return;
1903 
1904   // There's no point in looking through other users of ConstantData for
1905   // assumptions.  Confirm that we've handled them all.
1906   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1907 
1908   // All recursive calls that increase depth must come after this.
1909   if (Depth == MaxAnalysisRecursionDepth)
1910     return;
1911 
1912   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1913   // the bits of its aliasee.
1914   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1915     if (!GA->isInterposable())
1916       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1917     return;
1918   }
1919 
1920   if (const Operator *I = dyn_cast<Operator>(V))
1921     computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1922 
1923   // Aligned pointers have trailing zeros - refine Known.Zero set
1924   if (isa<PointerType>(V->getType())) {
1925     Align Alignment = V->getPointerAlignment(Q.DL);
1926     Known.Zero.setLowBits(Log2(Alignment));
1927   }
1928 
1929   // computeKnownBitsFromAssume strictly refines Known.
1930   // Therefore, we run them after computeKnownBitsFromOperator.
1931 
1932   // Check whether a nearby assume intrinsic can determine some known bits.
1933   computeKnownBitsFromAssume(V, Known, Depth, Q);
1934 
1935   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1936 }
1937 
1938 /// Return true if the given value is known to have exactly one
1939 /// bit set when defined. For vectors return true if every element is known to
1940 /// be a power of two when defined. Supports values with integer or pointer
1941 /// types and vectors of integers.
1942 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1943                             const Query &Q) {
1944   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1945 
1946   // Attempt to match against constants.
1947   if (OrZero && match(V, m_Power2OrZero()))
1948       return true;
1949   if (match(V, m_Power2()))
1950       return true;
1951 
1952   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1953   // it is shifted off the end then the result is undefined.
1954   if (match(V, m_Shl(m_One(), m_Value())))
1955     return true;
1956 
1957   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1958   // the bottom.  If it is shifted off the bottom then the result is undefined.
1959   if (match(V, m_LShr(m_SignMask(), m_Value())))
1960     return true;
1961 
1962   // The remaining tests are all recursive, so bail out if we hit the limit.
1963   if (Depth++ == MaxAnalysisRecursionDepth)
1964     return false;
1965 
1966   Value *X = nullptr, *Y = nullptr;
1967   // A shift left or a logical shift right of a power of two is a power of two
1968   // or zero.
1969   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1970                  match(V, m_LShr(m_Value(X), m_Value()))))
1971     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1972 
1973   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1974     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1975 
1976   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1977     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1978            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1979 
1980   // Peek through min/max.
1981   if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
1982     return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
1983            isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
1984   }
1985 
1986   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1987     // A power of two and'd with anything is a power of two or zero.
1988     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1989         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1990       return true;
1991     // X & (-X) is always a power of two or zero.
1992     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1993       return true;
1994     return false;
1995   }
1996 
1997   // Adding a power-of-two or zero to the same power-of-two or zero yields
1998   // either the original power-of-two, a larger power-of-two or zero.
1999   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2000     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2001     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2002         Q.IIQ.hasNoSignedWrap(VOBO)) {
2003       if (match(X, m_And(m_Specific(Y), m_Value())) ||
2004           match(X, m_And(m_Value(), m_Specific(Y))))
2005         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2006           return true;
2007       if (match(Y, m_And(m_Specific(X), m_Value())) ||
2008           match(Y, m_And(m_Value(), m_Specific(X))))
2009         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2010           return true;
2011 
2012       unsigned BitWidth = V->getType()->getScalarSizeInBits();
2013       KnownBits LHSBits(BitWidth);
2014       computeKnownBits(X, LHSBits, Depth, Q);
2015 
2016       KnownBits RHSBits(BitWidth);
2017       computeKnownBits(Y, RHSBits, Depth, Q);
2018       // If i8 V is a power of two or zero:
2019       //  ZeroBits: 1 1 1 0 1 1 1 1
2020       // ~ZeroBits: 0 0 0 1 0 0 0 0
2021       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2022         // If OrZero isn't set, we cannot give back a zero result.
2023         // Make sure either the LHS or RHS has a bit set.
2024         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2025           return true;
2026     }
2027   }
2028 
2029   // An exact divide or right shift can only shift off zero bits, so the result
2030   // is a power of two only if the first operand is a power of two and not
2031   // copying a sign bit (sdiv int_min, 2).
2032   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2033       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2034     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2035                                   Depth, Q);
2036   }
2037 
2038   return false;
2039 }
2040 
2041 /// Test whether a GEP's result is known to be non-null.
2042 ///
2043 /// Uses properties inherent in a GEP to try to determine whether it is known
2044 /// to be non-null.
2045 ///
2046 /// Currently this routine does not support vector GEPs.
2047 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2048                               const Query &Q) {
2049   const Function *F = nullptr;
2050   if (const Instruction *I = dyn_cast<Instruction>(GEP))
2051     F = I->getFunction();
2052 
2053   if (!GEP->isInBounds() ||
2054       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2055     return false;
2056 
2057   // FIXME: Support vector-GEPs.
2058   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2059 
2060   // If the base pointer is non-null, we cannot walk to a null address with an
2061   // inbounds GEP in address space zero.
2062   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2063     return true;
2064 
2065   // Walk the GEP operands and see if any operand introduces a non-zero offset.
2066   // If so, then the GEP cannot produce a null pointer, as doing so would
2067   // inherently violate the inbounds contract within address space zero.
2068   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2069        GTI != GTE; ++GTI) {
2070     // Struct types are easy -- they must always be indexed by a constant.
2071     if (StructType *STy = GTI.getStructTypeOrNull()) {
2072       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2073       unsigned ElementIdx = OpC->getZExtValue();
2074       const StructLayout *SL = Q.DL.getStructLayout(STy);
2075       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2076       if (ElementOffset > 0)
2077         return true;
2078       continue;
2079     }
2080 
2081     // If we have a zero-sized type, the index doesn't matter. Keep looping.
2082     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2083       continue;
2084 
2085     // Fast path the constant operand case both for efficiency and so we don't
2086     // increment Depth when just zipping down an all-constant GEP.
2087     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2088       if (!OpC->isZero())
2089         return true;
2090       continue;
2091     }
2092 
2093     // We post-increment Depth here because while isKnownNonZero increments it
2094     // as well, when we pop back up that increment won't persist. We don't want
2095     // to recurse 10k times just because we have 10k GEP operands. We don't
2096     // bail completely out because we want to handle constant GEPs regardless
2097     // of depth.
2098     if (Depth++ >= MaxAnalysisRecursionDepth)
2099       continue;
2100 
2101     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2102       return true;
2103   }
2104 
2105   return false;
2106 }
2107 
2108 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2109                                                   const Instruction *CtxI,
2110                                                   const DominatorTree *DT) {
2111   if (isa<Constant>(V))
2112     return false;
2113 
2114   if (!CtxI || !DT)
2115     return false;
2116 
2117   unsigned NumUsesExplored = 0;
2118   for (auto *U : V->users()) {
2119     // Avoid massive lists
2120     if (NumUsesExplored >= DomConditionsMaxUses)
2121       break;
2122     NumUsesExplored++;
2123 
2124     // If the value is used as an argument to a call or invoke, then argument
2125     // attributes may provide an answer about null-ness.
2126     if (const auto *CB = dyn_cast<CallBase>(U))
2127       if (auto *CalledFunc = CB->getCalledFunction())
2128         for (const Argument &Arg : CalledFunc->args())
2129           if (CB->getArgOperand(Arg.getArgNo()) == V &&
2130               Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2131               DT->dominates(CB, CtxI))
2132             return true;
2133 
2134     // If the value is used as a load/store, then the pointer must be non null.
2135     if (V == getLoadStorePointerOperand(U)) {
2136       const Instruction *I = cast<Instruction>(U);
2137       if (!NullPointerIsDefined(I->getFunction(),
2138                                 V->getType()->getPointerAddressSpace()) &&
2139           DT->dominates(I, CtxI))
2140         return true;
2141     }
2142 
2143     // Consider only compare instructions uniquely controlling a branch
2144     Value *RHS;
2145     CmpInst::Predicate Pred;
2146     if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2147       continue;
2148 
2149     bool NonNullIfTrue;
2150     if (cmpExcludesZero(Pred, RHS))
2151       NonNullIfTrue = true;
2152     else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2153       NonNullIfTrue = false;
2154     else
2155       continue;
2156 
2157     SmallVector<const User *, 4> WorkList;
2158     SmallPtrSet<const User *, 4> Visited;
2159     for (auto *CmpU : U->users()) {
2160       assert(WorkList.empty() && "Should be!");
2161       if (Visited.insert(CmpU).second)
2162         WorkList.push_back(CmpU);
2163 
2164       while (!WorkList.empty()) {
2165         auto *Curr = WorkList.pop_back_val();
2166 
2167         // If a user is an AND, add all its users to the work list. We only
2168         // propagate "pred != null" condition through AND because it is only
2169         // correct to assume that all conditions of AND are met in true branch.
2170         // TODO: Support similar logic of OR and EQ predicate?
2171         if (NonNullIfTrue)
2172           if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2173             for (auto *CurrU : Curr->users())
2174               if (Visited.insert(CurrU).second)
2175                 WorkList.push_back(CurrU);
2176             continue;
2177           }
2178 
2179         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2180           assert(BI->isConditional() && "uses a comparison!");
2181 
2182           BasicBlock *NonNullSuccessor =
2183               BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2184           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2185           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2186             return true;
2187         } else if (NonNullIfTrue && isGuard(Curr) &&
2188                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
2189           return true;
2190         }
2191       }
2192     }
2193   }
2194 
2195   return false;
2196 }
2197 
2198 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2199 /// ensure that the value it's attached to is never Value?  'RangeType' is
2200 /// is the type of the value described by the range.
2201 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2202   const unsigned NumRanges = Ranges->getNumOperands() / 2;
2203   assert(NumRanges >= 1);
2204   for (unsigned i = 0; i < NumRanges; ++i) {
2205     ConstantInt *Lower =
2206         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2207     ConstantInt *Upper =
2208         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2209     ConstantRange Range(Lower->getValue(), Upper->getValue());
2210     if (Range.contains(Value))
2211       return false;
2212   }
2213   return true;
2214 }
2215 
2216 /// Return true if the given value is known to be non-zero when defined. For
2217 /// vectors, return true if every demanded element is known to be non-zero when
2218 /// defined. For pointers, if the context instruction and dominator tree are
2219 /// specified, perform context-sensitive analysis and return true if the
2220 /// pointer couldn't possibly be null at the specified instruction.
2221 /// Supports values with integer or pointer type and vectors of integers.
2222 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2223                     const Query &Q) {
2224   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2225   // vector
2226   if (isa<ScalableVectorType>(V->getType()))
2227     return false;
2228 
2229   if (auto *C = dyn_cast<Constant>(V)) {
2230     if (C->isNullValue())
2231       return false;
2232     if (isa<ConstantInt>(C))
2233       // Must be non-zero due to null test above.
2234       return true;
2235 
2236     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2237       // See the comment for IntToPtr/PtrToInt instructions below.
2238       if (CE->getOpcode() == Instruction::IntToPtr ||
2239           CE->getOpcode() == Instruction::PtrToInt)
2240         if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2241                 .getFixedSize() <=
2242             Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2243           return isKnownNonZero(CE->getOperand(0), Depth, Q);
2244     }
2245 
2246     // For constant vectors, check that all elements are undefined or known
2247     // non-zero to determine that the whole vector is known non-zero.
2248     if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2249       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2250         if (!DemandedElts[i])
2251           continue;
2252         Constant *Elt = C->getAggregateElement(i);
2253         if (!Elt || Elt->isNullValue())
2254           return false;
2255         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2256           return false;
2257       }
2258       return true;
2259     }
2260 
2261     // A global variable in address space 0 is non null unless extern weak
2262     // or an absolute symbol reference. Other address spaces may have null as a
2263     // valid address for a global, so we can't assume anything.
2264     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2265       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2266           GV->getType()->getAddressSpace() == 0)
2267         return true;
2268     } else
2269       return false;
2270   }
2271 
2272   if (auto *I = dyn_cast<Instruction>(V)) {
2273     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2274       // If the possible ranges don't contain zero, then the value is
2275       // definitely non-zero.
2276       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2277         const APInt ZeroValue(Ty->getBitWidth(), 0);
2278         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2279           return true;
2280       }
2281     }
2282   }
2283 
2284   if (isKnownNonZeroFromAssume(V, Q))
2285     return true;
2286 
2287   // Some of the tests below are recursive, so bail out if we hit the limit.
2288   if (Depth++ >= MaxAnalysisRecursionDepth)
2289     return false;
2290 
2291   // Check for pointer simplifications.
2292 
2293   if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2294     // Alloca never returns null, malloc might.
2295     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2296       return true;
2297 
2298     // A byval, inalloca may not be null in a non-default addres space. A
2299     // nonnull argument is assumed never 0.
2300     if (const Argument *A = dyn_cast<Argument>(V)) {
2301       if (((A->hasPassPointeeByValueCopyAttr() &&
2302             !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2303            A->hasNonNullAttr()))
2304         return true;
2305     }
2306 
2307     // A Load tagged with nonnull metadata is never null.
2308     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2309       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2310         return true;
2311 
2312     if (const auto *Call = dyn_cast<CallBase>(V)) {
2313       if (Call->isReturnNonNull())
2314         return true;
2315       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2316         return isKnownNonZero(RP, Depth, Q);
2317     }
2318   }
2319 
2320   if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2321     return true;
2322 
2323   // Check for recursive pointer simplifications.
2324   if (V->getType()->isPointerTy()) {
2325     // Look through bitcast operations, GEPs, and int2ptr instructions as they
2326     // do not alter the value, or at least not the nullness property of the
2327     // value, e.g., int2ptr is allowed to zero/sign extend the value.
2328     //
2329     // Note that we have to take special care to avoid looking through
2330     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2331     // as casts that can alter the value, e.g., AddrSpaceCasts.
2332     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2333       return isGEPKnownNonNull(GEP, Depth, Q);
2334 
2335     if (auto *BCO = dyn_cast<BitCastOperator>(V))
2336       return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2337 
2338     if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2339       if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2340           Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2341         return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2342   }
2343 
2344   // Similar to int2ptr above, we can look through ptr2int here if the cast
2345   // is a no-op or an extend and not a truncate.
2346   if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2347     if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2348         Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2349       return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2350 
2351   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2352 
2353   // X | Y != 0 if X != 0 or Y != 0.
2354   Value *X = nullptr, *Y = nullptr;
2355   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2356     return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2357            isKnownNonZero(Y, DemandedElts, Depth, Q);
2358 
2359   // ext X != 0 if X != 0.
2360   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2361     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2362 
2363   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2364   // if the lowest bit is shifted off the end.
2365   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2366     // shl nuw can't remove any non-zero bits.
2367     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2368     if (Q.IIQ.hasNoUnsignedWrap(BO))
2369       return isKnownNonZero(X, Depth, Q);
2370 
2371     KnownBits Known(BitWidth);
2372     computeKnownBits(X, DemandedElts, Known, Depth, Q);
2373     if (Known.One[0])
2374       return true;
2375   }
2376   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2377   // defined if the sign bit is shifted off the end.
2378   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2379     // shr exact can only shift out zero bits.
2380     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2381     if (BO->isExact())
2382       return isKnownNonZero(X, Depth, Q);
2383 
2384     KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2385     if (Known.isNegative())
2386       return true;
2387 
2388     // If the shifter operand is a constant, and all of the bits shifted
2389     // out are known to be zero, and X is known non-zero then at least one
2390     // non-zero bit must remain.
2391     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2392       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2393       // Is there a known one in the portion not shifted out?
2394       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2395         return true;
2396       // Are all the bits to be shifted out known zero?
2397       if (Known.countMinTrailingZeros() >= ShiftVal)
2398         return isKnownNonZero(X, DemandedElts, Depth, Q);
2399     }
2400   }
2401   // div exact can only produce a zero if the dividend is zero.
2402   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2403     return isKnownNonZero(X, DemandedElts, Depth, Q);
2404   }
2405   // X + Y.
2406   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2407     KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2408     KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2409 
2410     // If X and Y are both non-negative (as signed values) then their sum is not
2411     // zero unless both X and Y are zero.
2412     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2413       if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2414           isKnownNonZero(Y, DemandedElts, Depth, Q))
2415         return true;
2416 
2417     // If X and Y are both negative (as signed values) then their sum is not
2418     // zero unless both X and Y equal INT_MIN.
2419     if (XKnown.isNegative() && YKnown.isNegative()) {
2420       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2421       // The sign bit of X is set.  If some other bit is set then X is not equal
2422       // to INT_MIN.
2423       if (XKnown.One.intersects(Mask))
2424         return true;
2425       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2426       // to INT_MIN.
2427       if (YKnown.One.intersects(Mask))
2428         return true;
2429     }
2430 
2431     // The sum of a non-negative number and a power of two is not zero.
2432     if (XKnown.isNonNegative() &&
2433         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2434       return true;
2435     if (YKnown.isNonNegative() &&
2436         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2437       return true;
2438   }
2439   // X * Y.
2440   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2441     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2442     // If X and Y are non-zero then so is X * Y as long as the multiplication
2443     // does not overflow.
2444     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2445         isKnownNonZero(X, DemandedElts, Depth, Q) &&
2446         isKnownNonZero(Y, DemandedElts, Depth, Q))
2447       return true;
2448   }
2449   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2450   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2451     if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2452         isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2453       return true;
2454   }
2455   // PHI
2456   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2457     // Try and detect a recurrence that monotonically increases from a
2458     // starting value, as these are common as induction variables.
2459     BinaryOperator *BO = nullptr;
2460     Value *Start = nullptr, *Step = nullptr;
2461     const APInt *StartC, *StepC;
2462     if (Q.IIQ.UseInstrInfo && matchSimpleRecurrence(PN, BO, Start, Step) &&
2463         match(Start, m_APInt(StartC)) && match(Step, m_APInt(StepC))) {
2464       if (BO->getOpcode() == Instruction::Add &&
2465           (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2466           StartC->isStrictlyPositive() && !StepC->isNegative())
2467         return true;
2468       if (BO->getOpcode() == Instruction::Mul &&
2469           (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2470           !StartC->isNullValue() && StepC->isStrictlyPositive())
2471         return true;
2472     }
2473     // Check if all incoming values are non-zero using recursion.
2474     Query RecQ = Q;
2475     unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2476     return llvm::all_of(PN->operands(), [&](const Use &U) {
2477       if (U.get() == PN)
2478         return true;
2479       RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2480       return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2481     });
2482   }
2483   // ExtractElement
2484   else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2485     const Value *Vec = EEI->getVectorOperand();
2486     const Value *Idx = EEI->getIndexOperand();
2487     auto *CIdx = dyn_cast<ConstantInt>(Idx);
2488     if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2489       unsigned NumElts = VecTy->getNumElements();
2490       APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2491       if (CIdx && CIdx->getValue().ult(NumElts))
2492         DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2493       return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2494     }
2495   }
2496   // Freeze
2497   else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2498     auto *Op = FI->getOperand(0);
2499     if (isKnownNonZero(Op, Depth, Q) &&
2500         isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2501       return true;
2502   }
2503 
2504   KnownBits Known(BitWidth);
2505   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2506   return Known.One != 0;
2507 }
2508 
2509 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2510   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2511   // vector
2512   if (isa<ScalableVectorType>(V->getType()))
2513     return false;
2514 
2515   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2516   APInt DemandedElts =
2517       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2518   return isKnownNonZero(V, DemandedElts, Depth, Q);
2519 }
2520 
2521 /// Return true if V2 == V1 + X, where X is known non-zero.
2522 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2523                            const Query &Q) {
2524   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2525   if (!BO || BO->getOpcode() != Instruction::Add)
2526     return false;
2527   Value *Op = nullptr;
2528   if (V2 == BO->getOperand(0))
2529     Op = BO->getOperand(1);
2530   else if (V2 == BO->getOperand(1))
2531     Op = BO->getOperand(0);
2532   else
2533     return false;
2534   return isKnownNonZero(Op, Depth + 1, Q);
2535 }
2536 
2537 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2538 /// the multiplication is nuw or nsw.
2539 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2540                           const Query &Q) {
2541   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2542     const APInt *C;
2543     return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2544            (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2545            !C->isNullValue() && !C->isOneValue() &&
2546            isKnownNonZero(V1, Depth + 1, Q);
2547   }
2548   return false;
2549 }
2550 
2551 /// Return true if it is known that V1 != V2.
2552 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2553                             const Query &Q) {
2554   if (V1 == V2)
2555     return false;
2556   if (V1->getType() != V2->getType())
2557     // We can't look through casts yet.
2558     return false;
2559 
2560   if (Depth >= MaxAnalysisRecursionDepth)
2561     return false;
2562 
2563   // See if we can recurse through (exactly one of) our operands.  This
2564   // requires our operation be 1-to-1 and map every input value to exactly
2565   // one output value.  Such an operation is invertible.
2566   auto *O1 = dyn_cast<Operator>(V1);
2567   auto *O2 = dyn_cast<Operator>(V2);
2568   if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2569     switch (O1->getOpcode()) {
2570     default: break;
2571     case Instruction::Add:
2572     case Instruction::Sub:
2573       // Assume operand order has been canonicalized
2574       if (O1->getOperand(0) == O2->getOperand(0))
2575         return isKnownNonEqual(O1->getOperand(1), O2->getOperand(1),
2576                                Depth + 1, Q);
2577       if (O1->getOperand(1) == O2->getOperand(1))
2578         return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2579                                Depth + 1, Q);
2580       break;
2581     case Instruction::Mul: {
2582       // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2583       // and N is the bitwdith.  The nsw case is non-obvious, but proven by
2584       // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2585       auto *OBO1 = cast<OverflowingBinaryOperator>(O1);
2586       auto *OBO2 = cast<OverflowingBinaryOperator>(O2);
2587       if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2588           (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2589         break;
2590 
2591       // Assume operand order has been canonicalized
2592       if (O1->getOperand(1) == O2->getOperand(1) &&
2593           isa<ConstantInt>(O1->getOperand(1)) &&
2594           !cast<ConstantInt>(O1->getOperand(1))->isZero())
2595         return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2596                                Depth + 1, Q);
2597       break;
2598     }
2599     case Instruction::SExt:
2600     case Instruction::ZExt:
2601       if (O1->getOperand(0)->getType() == O2->getOperand(0)->getType())
2602         return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2603                                Depth + 1, Q);
2604       break;
2605     };
2606   }
2607 
2608   if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2609     return true;
2610 
2611   if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2612     return true;
2613 
2614   if (V1->getType()->isIntOrIntVectorTy()) {
2615     // Are any known bits in V1 contradictory to known bits in V2? If V1
2616     // has a known zero where V2 has a known one, they must not be equal.
2617     KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2618     KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2619 
2620     if (Known1.Zero.intersects(Known2.One) ||
2621         Known2.Zero.intersects(Known1.One))
2622       return true;
2623   }
2624   return false;
2625 }
2626 
2627 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2628 /// simplify operations downstream. Mask is known to be zero for bits that V
2629 /// cannot have.
2630 ///
2631 /// This function is defined on values with integer type, values with pointer
2632 /// type, and vectors of integers.  In the case
2633 /// where V is a vector, the mask, known zero, and known one values are the
2634 /// same width as the vector element, and the bit is set only if it is true
2635 /// for all of the elements in the vector.
2636 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2637                        const Query &Q) {
2638   KnownBits Known(Mask.getBitWidth());
2639   computeKnownBits(V, Known, Depth, Q);
2640   return Mask.isSubsetOf(Known.Zero);
2641 }
2642 
2643 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2644 // Returns the input and lower/upper bounds.
2645 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2646                                 const APInt *&CLow, const APInt *&CHigh) {
2647   assert(isa<Operator>(Select) &&
2648          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2649          "Input should be a Select!");
2650 
2651   const Value *LHS = nullptr, *RHS = nullptr;
2652   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2653   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2654     return false;
2655 
2656   if (!match(RHS, m_APInt(CLow)))
2657     return false;
2658 
2659   const Value *LHS2 = nullptr, *RHS2 = nullptr;
2660   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2661   if (getInverseMinMaxFlavor(SPF) != SPF2)
2662     return false;
2663 
2664   if (!match(RHS2, m_APInt(CHigh)))
2665     return false;
2666 
2667   if (SPF == SPF_SMIN)
2668     std::swap(CLow, CHigh);
2669 
2670   In = LHS2;
2671   return CLow->sle(*CHigh);
2672 }
2673 
2674 /// For vector constants, loop over the elements and find the constant with the
2675 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2676 /// or if any element was not analyzed; otherwise, return the count for the
2677 /// element with the minimum number of sign bits.
2678 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2679                                                  const APInt &DemandedElts,
2680                                                  unsigned TyBits) {
2681   const auto *CV = dyn_cast<Constant>(V);
2682   if (!CV || !isa<FixedVectorType>(CV->getType()))
2683     return 0;
2684 
2685   unsigned MinSignBits = TyBits;
2686   unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2687   for (unsigned i = 0; i != NumElts; ++i) {
2688     if (!DemandedElts[i])
2689       continue;
2690     // If we find a non-ConstantInt, bail out.
2691     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2692     if (!Elt)
2693       return 0;
2694 
2695     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2696   }
2697 
2698   return MinSignBits;
2699 }
2700 
2701 static unsigned ComputeNumSignBitsImpl(const Value *V,
2702                                        const APInt &DemandedElts,
2703                                        unsigned Depth, const Query &Q);
2704 
2705 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2706                                    unsigned Depth, const Query &Q) {
2707   unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2708   assert(Result > 0 && "At least one sign bit needs to be present!");
2709   return Result;
2710 }
2711 
2712 /// Return the number of times the sign bit of the register is replicated into
2713 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2714 /// (itself), but other cases can give us information. For example, immediately
2715 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2716 /// other, so we return 3. For vectors, return the number of sign bits for the
2717 /// vector element with the minimum number of known sign bits of the demanded
2718 /// elements in the vector specified by DemandedElts.
2719 static unsigned ComputeNumSignBitsImpl(const Value *V,
2720                                        const APInt &DemandedElts,
2721                                        unsigned Depth, const Query &Q) {
2722   Type *Ty = V->getType();
2723 
2724   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2725   // vector
2726   if (isa<ScalableVectorType>(Ty))
2727     return 1;
2728 
2729 #ifndef NDEBUG
2730   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2731 
2732   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2733     assert(
2734         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2735         "DemandedElt width should equal the fixed vector number of elements");
2736   } else {
2737     assert(DemandedElts == APInt(1, 1) &&
2738            "DemandedElt width should be 1 for scalars");
2739   }
2740 #endif
2741 
2742   // We return the minimum number of sign bits that are guaranteed to be present
2743   // in V, so for undef we have to conservatively return 1.  We don't have the
2744   // same behavior for poison though -- that's a FIXME today.
2745 
2746   Type *ScalarTy = Ty->getScalarType();
2747   unsigned TyBits = ScalarTy->isPointerTy() ?
2748     Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2749     Q.DL.getTypeSizeInBits(ScalarTy);
2750 
2751   unsigned Tmp, Tmp2;
2752   unsigned FirstAnswer = 1;
2753 
2754   // Note that ConstantInt is handled by the general computeKnownBits case
2755   // below.
2756 
2757   if (Depth == MaxAnalysisRecursionDepth)
2758     return 1;
2759 
2760   if (auto *U = dyn_cast<Operator>(V)) {
2761     switch (Operator::getOpcode(V)) {
2762     default: break;
2763     case Instruction::SExt:
2764       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2765       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2766 
2767     case Instruction::SDiv: {
2768       const APInt *Denominator;
2769       // sdiv X, C -> adds log(C) sign bits.
2770       if (match(U->getOperand(1), m_APInt(Denominator))) {
2771 
2772         // Ignore non-positive denominator.
2773         if (!Denominator->isStrictlyPositive())
2774           break;
2775 
2776         // Calculate the incoming numerator bits.
2777         unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2778 
2779         // Add floor(log(C)) bits to the numerator bits.
2780         return std::min(TyBits, NumBits + Denominator->logBase2());
2781       }
2782       break;
2783     }
2784 
2785     case Instruction::SRem: {
2786       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2787 
2788       const APInt *Denominator;
2789       // srem X, C -> we know that the result is within [-C+1,C) when C is a
2790       // positive constant.  This let us put a lower bound on the number of sign
2791       // bits.
2792       if (match(U->getOperand(1), m_APInt(Denominator))) {
2793 
2794         // Ignore non-positive denominator.
2795         if (Denominator->isStrictlyPositive()) {
2796           // Calculate the leading sign bit constraints by examining the
2797           // denominator.  Given that the denominator is positive, there are two
2798           // cases:
2799           //
2800           //  1. The numerator is positive. The result range is [0,C) and
2801           //     [0,C) u< (1 << ceilLogBase2(C)).
2802           //
2803           //  2. The numerator is negative. Then the result range is (-C,0] and
2804           //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2805           //
2806           // Thus a lower bound on the number of sign bits is `TyBits -
2807           // ceilLogBase2(C)`.
2808 
2809           unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2810           Tmp = std::max(Tmp, ResBits);
2811         }
2812       }
2813       return Tmp;
2814     }
2815 
2816     case Instruction::AShr: {
2817       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2818       // ashr X, C   -> adds C sign bits.  Vectors too.
2819       const APInt *ShAmt;
2820       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2821         if (ShAmt->uge(TyBits))
2822           break; // Bad shift.
2823         unsigned ShAmtLimited = ShAmt->getZExtValue();
2824         Tmp += ShAmtLimited;
2825         if (Tmp > TyBits) Tmp = TyBits;
2826       }
2827       return Tmp;
2828     }
2829     case Instruction::Shl: {
2830       const APInt *ShAmt;
2831       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2832         // shl destroys sign bits.
2833         Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2834         if (ShAmt->uge(TyBits) ||   // Bad shift.
2835             ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2836         Tmp2 = ShAmt->getZExtValue();
2837         return Tmp - Tmp2;
2838       }
2839       break;
2840     }
2841     case Instruction::And:
2842     case Instruction::Or:
2843     case Instruction::Xor: // NOT is handled here.
2844       // Logical binary ops preserve the number of sign bits at the worst.
2845       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2846       if (Tmp != 1) {
2847         Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2848         FirstAnswer = std::min(Tmp, Tmp2);
2849         // We computed what we know about the sign bits as our first
2850         // answer. Now proceed to the generic code that uses
2851         // computeKnownBits, and pick whichever answer is better.
2852       }
2853       break;
2854 
2855     case Instruction::Select: {
2856       // If we have a clamp pattern, we know that the number of sign bits will
2857       // be the minimum of the clamp min/max range.
2858       const Value *X;
2859       const APInt *CLow, *CHigh;
2860       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2861         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2862 
2863       Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2864       if (Tmp == 1) break;
2865       Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2866       return std::min(Tmp, Tmp2);
2867     }
2868 
2869     case Instruction::Add:
2870       // Add can have at most one carry bit.  Thus we know that the output
2871       // is, at worst, one more bit than the inputs.
2872       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2873       if (Tmp == 1) break;
2874 
2875       // Special case decrementing a value (ADD X, -1):
2876       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2877         if (CRHS->isAllOnesValue()) {
2878           KnownBits Known(TyBits);
2879           computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2880 
2881           // If the input is known to be 0 or 1, the output is 0/-1, which is
2882           // all sign bits set.
2883           if ((Known.Zero | 1).isAllOnesValue())
2884             return TyBits;
2885 
2886           // If we are subtracting one from a positive number, there is no carry
2887           // out of the result.
2888           if (Known.isNonNegative())
2889             return Tmp;
2890         }
2891 
2892       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2893       if (Tmp2 == 1) break;
2894       return std::min(Tmp, Tmp2) - 1;
2895 
2896     case Instruction::Sub:
2897       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2898       if (Tmp2 == 1) break;
2899 
2900       // Handle NEG.
2901       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2902         if (CLHS->isNullValue()) {
2903           KnownBits Known(TyBits);
2904           computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2905           // If the input is known to be 0 or 1, the output is 0/-1, which is
2906           // all sign bits set.
2907           if ((Known.Zero | 1).isAllOnesValue())
2908             return TyBits;
2909 
2910           // If the input is known to be positive (the sign bit is known clear),
2911           // the output of the NEG has the same number of sign bits as the
2912           // input.
2913           if (Known.isNonNegative())
2914             return Tmp2;
2915 
2916           // Otherwise, we treat this like a SUB.
2917         }
2918 
2919       // Sub can have at most one carry bit.  Thus we know that the output
2920       // is, at worst, one more bit than the inputs.
2921       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2922       if (Tmp == 1) break;
2923       return std::min(Tmp, Tmp2) - 1;
2924 
2925     case Instruction::Mul: {
2926       // The output of the Mul can be at most twice the valid bits in the
2927       // inputs.
2928       unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2929       if (SignBitsOp0 == 1) break;
2930       unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2931       if (SignBitsOp1 == 1) break;
2932       unsigned OutValidBits =
2933           (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2934       return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2935     }
2936 
2937     case Instruction::PHI: {
2938       const PHINode *PN = cast<PHINode>(U);
2939       unsigned NumIncomingValues = PN->getNumIncomingValues();
2940       // Don't analyze large in-degree PHIs.
2941       if (NumIncomingValues > 4) break;
2942       // Unreachable blocks may have zero-operand PHI nodes.
2943       if (NumIncomingValues == 0) break;
2944 
2945       // Take the minimum of all incoming values.  This can't infinitely loop
2946       // because of our depth threshold.
2947       Query RecQ = Q;
2948       Tmp = TyBits;
2949       for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
2950         if (Tmp == 1) return Tmp;
2951         RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
2952         Tmp = std::min(
2953             Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
2954       }
2955       return Tmp;
2956     }
2957 
2958     case Instruction::Trunc:
2959       // FIXME: it's tricky to do anything useful for this, but it is an
2960       // important case for targets like X86.
2961       break;
2962 
2963     case Instruction::ExtractElement:
2964       // Look through extract element. At the moment we keep this simple and
2965       // skip tracking the specific element. But at least we might find
2966       // information valid for all elements of the vector (for example if vector
2967       // is sign extended, shifted, etc).
2968       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2969 
2970     case Instruction::ShuffleVector: {
2971       // Collect the minimum number of sign bits that are shared by every vector
2972       // element referenced by the shuffle.
2973       auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
2974       if (!Shuf) {
2975         // FIXME: Add support for shufflevector constant expressions.
2976         return 1;
2977       }
2978       APInt DemandedLHS, DemandedRHS;
2979       // For undef elements, we don't know anything about the common state of
2980       // the shuffle result.
2981       if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
2982         return 1;
2983       Tmp = std::numeric_limits<unsigned>::max();
2984       if (!!DemandedLHS) {
2985         const Value *LHS = Shuf->getOperand(0);
2986         Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
2987       }
2988       // If we don't know anything, early out and try computeKnownBits
2989       // fall-back.
2990       if (Tmp == 1)
2991         break;
2992       if (!!DemandedRHS) {
2993         const Value *RHS = Shuf->getOperand(1);
2994         Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
2995         Tmp = std::min(Tmp, Tmp2);
2996       }
2997       // If we don't know anything, early out and try computeKnownBits
2998       // fall-back.
2999       if (Tmp == 1)
3000         break;
3001       assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
3002       return Tmp;
3003     }
3004     case Instruction::Call: {
3005       if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3006         switch (II->getIntrinsicID()) {
3007         default: break;
3008         case Intrinsic::abs:
3009           Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3010           if (Tmp == 1) break;
3011 
3012           // Absolute value reduces number of sign bits by at most 1.
3013           return Tmp - 1;
3014         }
3015       }
3016     }
3017     }
3018   }
3019 
3020   // Finally, if we can prove that the top bits of the result are 0's or 1's,
3021   // use this information.
3022 
3023   // If we can examine all elements of a vector constant successfully, we're
3024   // done (we can't do any better than that). If not, keep trying.
3025   if (unsigned VecSignBits =
3026           computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3027     return VecSignBits;
3028 
3029   KnownBits Known(TyBits);
3030   computeKnownBits(V, DemandedElts, Known, Depth, Q);
3031 
3032   // If we know that the sign bit is either zero or one, determine the number of
3033   // identical bits in the top of the input value.
3034   return std::max(FirstAnswer, Known.countMinSignBits());
3035 }
3036 
3037 /// This function computes the integer multiple of Base that equals V.
3038 /// If successful, it returns true and returns the multiple in
3039 /// Multiple. If unsuccessful, it returns false. It looks
3040 /// through SExt instructions only if LookThroughSExt is true.
3041 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
3042                            bool LookThroughSExt, unsigned Depth) {
3043   assert(V && "No Value?");
3044   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3045   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
3046 
3047   Type *T = V->getType();
3048 
3049   ConstantInt *CI = dyn_cast<ConstantInt>(V);
3050 
3051   if (Base == 0)
3052     return false;
3053 
3054   if (Base == 1) {
3055     Multiple = V;
3056     return true;
3057   }
3058 
3059   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3060   Constant *BaseVal = ConstantInt::get(T, Base);
3061   if (CO && CO == BaseVal) {
3062     // Multiple is 1.
3063     Multiple = ConstantInt::get(T, 1);
3064     return true;
3065   }
3066 
3067   if (CI && CI->getZExtValue() % Base == 0) {
3068     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3069     return true;
3070   }
3071 
3072   if (Depth == MaxAnalysisRecursionDepth) return false;
3073 
3074   Operator *I = dyn_cast<Operator>(V);
3075   if (!I) return false;
3076 
3077   switch (I->getOpcode()) {
3078   default: break;
3079   case Instruction::SExt:
3080     if (!LookThroughSExt) return false;
3081     // otherwise fall through to ZExt
3082     LLVM_FALLTHROUGH;
3083   case Instruction::ZExt:
3084     return ComputeMultiple(I->getOperand(0), Base, Multiple,
3085                            LookThroughSExt, Depth+1);
3086   case Instruction::Shl:
3087   case Instruction::Mul: {
3088     Value *Op0 = I->getOperand(0);
3089     Value *Op1 = I->getOperand(1);
3090 
3091     if (I->getOpcode() == Instruction::Shl) {
3092       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3093       if (!Op1CI) return false;
3094       // Turn Op0 << Op1 into Op0 * 2^Op1
3095       APInt Op1Int = Op1CI->getValue();
3096       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3097       APInt API(Op1Int.getBitWidth(), 0);
3098       API.setBit(BitToSet);
3099       Op1 = ConstantInt::get(V->getContext(), API);
3100     }
3101 
3102     Value *Mul0 = nullptr;
3103     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3104       if (Constant *Op1C = dyn_cast<Constant>(Op1))
3105         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3106           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3107               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3108             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3109           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3110               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3111             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3112 
3113           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3114           Multiple = ConstantExpr::getMul(MulC, Op1C);
3115           return true;
3116         }
3117 
3118       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3119         if (Mul0CI->getValue() == 1) {
3120           // V == Base * Op1, so return Op1
3121           Multiple = Op1;
3122           return true;
3123         }
3124     }
3125 
3126     Value *Mul1 = nullptr;
3127     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3128       if (Constant *Op0C = dyn_cast<Constant>(Op0))
3129         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3130           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3131               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3132             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3133           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3134               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3135             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3136 
3137           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3138           Multiple = ConstantExpr::getMul(MulC, Op0C);
3139           return true;
3140         }
3141 
3142       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3143         if (Mul1CI->getValue() == 1) {
3144           // V == Base * Op0, so return Op0
3145           Multiple = Op0;
3146           return true;
3147         }
3148     }
3149   }
3150   }
3151 
3152   // We could not determine if V is a multiple of Base.
3153   return false;
3154 }
3155 
3156 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3157                                             const TargetLibraryInfo *TLI) {
3158   const Function *F = CB.getCalledFunction();
3159   if (!F)
3160     return Intrinsic::not_intrinsic;
3161 
3162   if (F->isIntrinsic())
3163     return F->getIntrinsicID();
3164 
3165   // We are going to infer semantics of a library function based on mapping it
3166   // to an LLVM intrinsic. Check that the library function is available from
3167   // this callbase and in this environment.
3168   LibFunc Func;
3169   if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3170       !CB.onlyReadsMemory())
3171     return Intrinsic::not_intrinsic;
3172 
3173   switch (Func) {
3174   default:
3175     break;
3176   case LibFunc_sin:
3177   case LibFunc_sinf:
3178   case LibFunc_sinl:
3179     return Intrinsic::sin;
3180   case LibFunc_cos:
3181   case LibFunc_cosf:
3182   case LibFunc_cosl:
3183     return Intrinsic::cos;
3184   case LibFunc_exp:
3185   case LibFunc_expf:
3186   case LibFunc_expl:
3187     return Intrinsic::exp;
3188   case LibFunc_exp2:
3189   case LibFunc_exp2f:
3190   case LibFunc_exp2l:
3191     return Intrinsic::exp2;
3192   case LibFunc_log:
3193   case LibFunc_logf:
3194   case LibFunc_logl:
3195     return Intrinsic::log;
3196   case LibFunc_log10:
3197   case LibFunc_log10f:
3198   case LibFunc_log10l:
3199     return Intrinsic::log10;
3200   case LibFunc_log2:
3201   case LibFunc_log2f:
3202   case LibFunc_log2l:
3203     return Intrinsic::log2;
3204   case LibFunc_fabs:
3205   case LibFunc_fabsf:
3206   case LibFunc_fabsl:
3207     return Intrinsic::fabs;
3208   case LibFunc_fmin:
3209   case LibFunc_fminf:
3210   case LibFunc_fminl:
3211     return Intrinsic::minnum;
3212   case LibFunc_fmax:
3213   case LibFunc_fmaxf:
3214   case LibFunc_fmaxl:
3215     return Intrinsic::maxnum;
3216   case LibFunc_copysign:
3217   case LibFunc_copysignf:
3218   case LibFunc_copysignl:
3219     return Intrinsic::copysign;
3220   case LibFunc_floor:
3221   case LibFunc_floorf:
3222   case LibFunc_floorl:
3223     return Intrinsic::floor;
3224   case LibFunc_ceil:
3225   case LibFunc_ceilf:
3226   case LibFunc_ceill:
3227     return Intrinsic::ceil;
3228   case LibFunc_trunc:
3229   case LibFunc_truncf:
3230   case LibFunc_truncl:
3231     return Intrinsic::trunc;
3232   case LibFunc_rint:
3233   case LibFunc_rintf:
3234   case LibFunc_rintl:
3235     return Intrinsic::rint;
3236   case LibFunc_nearbyint:
3237   case LibFunc_nearbyintf:
3238   case LibFunc_nearbyintl:
3239     return Intrinsic::nearbyint;
3240   case LibFunc_round:
3241   case LibFunc_roundf:
3242   case LibFunc_roundl:
3243     return Intrinsic::round;
3244   case LibFunc_roundeven:
3245   case LibFunc_roundevenf:
3246   case LibFunc_roundevenl:
3247     return Intrinsic::roundeven;
3248   case LibFunc_pow:
3249   case LibFunc_powf:
3250   case LibFunc_powl:
3251     return Intrinsic::pow;
3252   case LibFunc_sqrt:
3253   case LibFunc_sqrtf:
3254   case LibFunc_sqrtl:
3255     return Intrinsic::sqrt;
3256   }
3257 
3258   return Intrinsic::not_intrinsic;
3259 }
3260 
3261 /// Return true if we can prove that the specified FP value is never equal to
3262 /// -0.0.
3263 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3264 ///       that a value is not -0.0. It only guarantees that -0.0 may be treated
3265 ///       the same as +0.0 in floating-point ops.
3266 ///
3267 /// NOTE: this function will need to be revisited when we support non-default
3268 /// rounding modes!
3269 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3270                                 unsigned Depth) {
3271   if (auto *CFP = dyn_cast<ConstantFP>(V))
3272     return !CFP->getValueAPF().isNegZero();
3273 
3274   if (Depth == MaxAnalysisRecursionDepth)
3275     return false;
3276 
3277   auto *Op = dyn_cast<Operator>(V);
3278   if (!Op)
3279     return false;
3280 
3281   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3282   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3283     return true;
3284 
3285   // sitofp and uitofp turn into +0.0 for zero.
3286   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3287     return true;
3288 
3289   if (auto *Call = dyn_cast<CallInst>(Op)) {
3290     Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3291     switch (IID) {
3292     default:
3293       break;
3294     // sqrt(-0.0) = -0.0, no other negative results are possible.
3295     case Intrinsic::sqrt:
3296     case Intrinsic::canonicalize:
3297       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3298     // fabs(x) != -0.0
3299     case Intrinsic::fabs:
3300       return true;
3301     }
3302   }
3303 
3304   return false;
3305 }
3306 
3307 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3308 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3309 /// bit despite comparing equal.
3310 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3311                                             const TargetLibraryInfo *TLI,
3312                                             bool SignBitOnly,
3313                                             unsigned Depth) {
3314   // TODO: This function does not do the right thing when SignBitOnly is true
3315   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3316   // which flips the sign bits of NaNs.  See
3317   // https://llvm.org/bugs/show_bug.cgi?id=31702.
3318 
3319   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3320     return !CFP->getValueAPF().isNegative() ||
3321            (!SignBitOnly && CFP->getValueAPF().isZero());
3322   }
3323 
3324   // Handle vector of constants.
3325   if (auto *CV = dyn_cast<Constant>(V)) {
3326     if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3327       unsigned NumElts = CVFVTy->getNumElements();
3328       for (unsigned i = 0; i != NumElts; ++i) {
3329         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3330         if (!CFP)
3331           return false;
3332         if (CFP->getValueAPF().isNegative() &&
3333             (SignBitOnly || !CFP->getValueAPF().isZero()))
3334           return false;
3335       }
3336 
3337       // All non-negative ConstantFPs.
3338       return true;
3339     }
3340   }
3341 
3342   if (Depth == MaxAnalysisRecursionDepth)
3343     return false;
3344 
3345   const Operator *I = dyn_cast<Operator>(V);
3346   if (!I)
3347     return false;
3348 
3349   switch (I->getOpcode()) {
3350   default:
3351     break;
3352   // Unsigned integers are always nonnegative.
3353   case Instruction::UIToFP:
3354     return true;
3355   case Instruction::FMul:
3356   case Instruction::FDiv:
3357     // X * X is always non-negative or a NaN.
3358     // X / X is always exactly 1.0 or a NaN.
3359     if (I->getOperand(0) == I->getOperand(1) &&
3360         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3361       return true;
3362 
3363     LLVM_FALLTHROUGH;
3364   case Instruction::FAdd:
3365   case Instruction::FRem:
3366     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3367                                            Depth + 1) &&
3368            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3369                                            Depth + 1);
3370   case Instruction::Select:
3371     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3372                                            Depth + 1) &&
3373            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3374                                            Depth + 1);
3375   case Instruction::FPExt:
3376   case Instruction::FPTrunc:
3377     // Widening/narrowing never change sign.
3378     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3379                                            Depth + 1);
3380   case Instruction::ExtractElement:
3381     // Look through extract element. At the moment we keep this simple and skip
3382     // tracking the specific element. But at least we might find information
3383     // valid for all elements of the vector.
3384     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3385                                            Depth + 1);
3386   case Instruction::Call:
3387     const auto *CI = cast<CallInst>(I);
3388     Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3389     switch (IID) {
3390     default:
3391       break;
3392     case Intrinsic::maxnum: {
3393       Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3394       auto isPositiveNum = [&](Value *V) {
3395         if (SignBitOnly) {
3396           // With SignBitOnly, this is tricky because the result of
3397           // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3398           // a constant strictly greater than 0.0.
3399           const APFloat *C;
3400           return match(V, m_APFloat(C)) &&
3401                  *C > APFloat::getZero(C->getSemantics());
3402         }
3403 
3404         // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3405         // maxnum can't be ordered-less-than-zero.
3406         return isKnownNeverNaN(V, TLI) &&
3407                cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3408       };
3409 
3410       // TODO: This could be improved. We could also check that neither operand
3411       //       has its sign bit set (and at least 1 is not-NAN?).
3412       return isPositiveNum(V0) || isPositiveNum(V1);
3413     }
3414 
3415     case Intrinsic::maximum:
3416       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3417                                              Depth + 1) ||
3418              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3419                                              Depth + 1);
3420     case Intrinsic::minnum:
3421     case Intrinsic::minimum:
3422       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3423                                              Depth + 1) &&
3424              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3425                                              Depth + 1);
3426     case Intrinsic::exp:
3427     case Intrinsic::exp2:
3428     case Intrinsic::fabs:
3429       return true;
3430 
3431     case Intrinsic::sqrt:
3432       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3433       if (!SignBitOnly)
3434         return true;
3435       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3436                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3437 
3438     case Intrinsic::powi:
3439       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3440         // powi(x,n) is non-negative if n is even.
3441         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3442           return true;
3443       }
3444       // TODO: This is not correct.  Given that exp is an integer, here are the
3445       // ways that pow can return a negative value:
3446       //
3447       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3448       //   pow(-0, exp)   --> -inf if exp is negative odd.
3449       //   pow(-0, exp)   --> -0 if exp is positive odd.
3450       //   pow(-inf, exp) --> -0 if exp is negative odd.
3451       //   pow(-inf, exp) --> -inf if exp is positive odd.
3452       //
3453       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3454       // but we must return false if x == -0.  Unfortunately we do not currently
3455       // have a way of expressing this constraint.  See details in
3456       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3457       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3458                                              Depth + 1);
3459 
3460     case Intrinsic::fma:
3461     case Intrinsic::fmuladd:
3462       // x*x+y is non-negative if y is non-negative.
3463       return I->getOperand(0) == I->getOperand(1) &&
3464              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3465              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3466                                              Depth + 1);
3467     }
3468     break;
3469   }
3470   return false;
3471 }
3472 
3473 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3474                                        const TargetLibraryInfo *TLI) {
3475   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3476 }
3477 
3478 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3479   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3480 }
3481 
3482 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3483                                 unsigned Depth) {
3484   assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3485 
3486   // If we're told that infinities won't happen, assume they won't.
3487   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3488     if (FPMathOp->hasNoInfs())
3489       return true;
3490 
3491   // Handle scalar constants.
3492   if (auto *CFP = dyn_cast<ConstantFP>(V))
3493     return !CFP->isInfinity();
3494 
3495   if (Depth == MaxAnalysisRecursionDepth)
3496     return false;
3497 
3498   if (auto *Inst = dyn_cast<Instruction>(V)) {
3499     switch (Inst->getOpcode()) {
3500     case Instruction::Select: {
3501       return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3502              isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3503     }
3504     case Instruction::SIToFP:
3505     case Instruction::UIToFP: {
3506       // Get width of largest magnitude integer (remove a bit if signed).
3507       // This still works for a signed minimum value because the largest FP
3508       // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3509       int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3510       if (Inst->getOpcode() == Instruction::SIToFP)
3511         --IntSize;
3512 
3513       // If the exponent of the largest finite FP value can hold the largest
3514       // integer, the result of the cast must be finite.
3515       Type *FPTy = Inst->getType()->getScalarType();
3516       return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3517     }
3518     default:
3519       break;
3520     }
3521   }
3522 
3523   // try to handle fixed width vector constants
3524   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3525   if (VFVTy && isa<Constant>(V)) {
3526     // For vectors, verify that each element is not infinity.
3527     unsigned NumElts = VFVTy->getNumElements();
3528     for (unsigned i = 0; i != NumElts; ++i) {
3529       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3530       if (!Elt)
3531         return false;
3532       if (isa<UndefValue>(Elt))
3533         continue;
3534       auto *CElt = dyn_cast<ConstantFP>(Elt);
3535       if (!CElt || CElt->isInfinity())
3536         return false;
3537     }
3538     // All elements were confirmed non-infinity or undefined.
3539     return true;
3540   }
3541 
3542   // was not able to prove that V never contains infinity
3543   return false;
3544 }
3545 
3546 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3547                            unsigned Depth) {
3548   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3549 
3550   // If we're told that NaNs won't happen, assume they won't.
3551   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3552     if (FPMathOp->hasNoNaNs())
3553       return true;
3554 
3555   // Handle scalar constants.
3556   if (auto *CFP = dyn_cast<ConstantFP>(V))
3557     return !CFP->isNaN();
3558 
3559   if (Depth == MaxAnalysisRecursionDepth)
3560     return false;
3561 
3562   if (auto *Inst = dyn_cast<Instruction>(V)) {
3563     switch (Inst->getOpcode()) {
3564     case Instruction::FAdd:
3565     case Instruction::FSub:
3566       // Adding positive and negative infinity produces NaN.
3567       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3568              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3569              (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3570               isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3571 
3572     case Instruction::FMul:
3573       // Zero multiplied with infinity produces NaN.
3574       // FIXME: If neither side can be zero fmul never produces NaN.
3575       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3576              isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3577              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3578              isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3579 
3580     case Instruction::FDiv:
3581     case Instruction::FRem:
3582       // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3583       return false;
3584 
3585     case Instruction::Select: {
3586       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3587              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3588     }
3589     case Instruction::SIToFP:
3590     case Instruction::UIToFP:
3591       return true;
3592     case Instruction::FPTrunc:
3593     case Instruction::FPExt:
3594       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3595     default:
3596       break;
3597     }
3598   }
3599 
3600   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3601     switch (II->getIntrinsicID()) {
3602     case Intrinsic::canonicalize:
3603     case Intrinsic::fabs:
3604     case Intrinsic::copysign:
3605     case Intrinsic::exp:
3606     case Intrinsic::exp2:
3607     case Intrinsic::floor:
3608     case Intrinsic::ceil:
3609     case Intrinsic::trunc:
3610     case Intrinsic::rint:
3611     case Intrinsic::nearbyint:
3612     case Intrinsic::round:
3613     case Intrinsic::roundeven:
3614       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3615     case Intrinsic::sqrt:
3616       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3617              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3618     case Intrinsic::minnum:
3619     case Intrinsic::maxnum:
3620       // If either operand is not NaN, the result is not NaN.
3621       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3622              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3623     default:
3624       return false;
3625     }
3626   }
3627 
3628   // Try to handle fixed width vector constants
3629   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3630   if (VFVTy && isa<Constant>(V)) {
3631     // For vectors, verify that each element is not NaN.
3632     unsigned NumElts = VFVTy->getNumElements();
3633     for (unsigned i = 0; i != NumElts; ++i) {
3634       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3635       if (!Elt)
3636         return false;
3637       if (isa<UndefValue>(Elt))
3638         continue;
3639       auto *CElt = dyn_cast<ConstantFP>(Elt);
3640       if (!CElt || CElt->isNaN())
3641         return false;
3642     }
3643     // All elements were confirmed not-NaN or undefined.
3644     return true;
3645   }
3646 
3647   // Was not able to prove that V never contains NaN
3648   return false;
3649 }
3650 
3651 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3652 
3653   // All byte-wide stores are splatable, even of arbitrary variables.
3654   if (V->getType()->isIntegerTy(8))
3655     return V;
3656 
3657   LLVMContext &Ctx = V->getContext();
3658 
3659   // Undef don't care.
3660   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3661   if (isa<UndefValue>(V))
3662     return UndefInt8;
3663 
3664   // Return Undef for zero-sized type.
3665   if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3666     return UndefInt8;
3667 
3668   Constant *C = dyn_cast<Constant>(V);
3669   if (!C) {
3670     // Conceptually, we could handle things like:
3671     //   %a = zext i8 %X to i16
3672     //   %b = shl i16 %a, 8
3673     //   %c = or i16 %a, %b
3674     // but until there is an example that actually needs this, it doesn't seem
3675     // worth worrying about.
3676     return nullptr;
3677   }
3678 
3679   // Handle 'null' ConstantArrayZero etc.
3680   if (C->isNullValue())
3681     return Constant::getNullValue(Type::getInt8Ty(Ctx));
3682 
3683   // Constant floating-point values can be handled as integer values if the
3684   // corresponding integer value is "byteable".  An important case is 0.0.
3685   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3686     Type *Ty = nullptr;
3687     if (CFP->getType()->isHalfTy())
3688       Ty = Type::getInt16Ty(Ctx);
3689     else if (CFP->getType()->isFloatTy())
3690       Ty = Type::getInt32Ty(Ctx);
3691     else if (CFP->getType()->isDoubleTy())
3692       Ty = Type::getInt64Ty(Ctx);
3693     // Don't handle long double formats, which have strange constraints.
3694     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3695               : nullptr;
3696   }
3697 
3698   // We can handle constant integers that are multiple of 8 bits.
3699   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3700     if (CI->getBitWidth() % 8 == 0) {
3701       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3702       if (!CI->getValue().isSplat(8))
3703         return nullptr;
3704       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3705     }
3706   }
3707 
3708   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3709     if (CE->getOpcode() == Instruction::IntToPtr) {
3710       if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3711         unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3712         return isBytewiseValue(
3713             ConstantExpr::getIntegerCast(CE->getOperand(0),
3714                                          Type::getIntNTy(Ctx, BitWidth), false),
3715             DL);
3716       }
3717     }
3718   }
3719 
3720   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3721     if (LHS == RHS)
3722       return LHS;
3723     if (!LHS || !RHS)
3724       return nullptr;
3725     if (LHS == UndefInt8)
3726       return RHS;
3727     if (RHS == UndefInt8)
3728       return LHS;
3729     return nullptr;
3730   };
3731 
3732   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3733     Value *Val = UndefInt8;
3734     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3735       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3736         return nullptr;
3737     return Val;
3738   }
3739 
3740   if (isa<ConstantAggregate>(C)) {
3741     Value *Val = UndefInt8;
3742     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3743       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3744         return nullptr;
3745     return Val;
3746   }
3747 
3748   // Don't try to handle the handful of other constants.
3749   return nullptr;
3750 }
3751 
3752 // This is the recursive version of BuildSubAggregate. It takes a few different
3753 // arguments. Idxs is the index within the nested struct From that we are
3754 // looking at now (which is of type IndexedType). IdxSkip is the number of
3755 // indices from Idxs that should be left out when inserting into the resulting
3756 // struct. To is the result struct built so far, new insertvalue instructions
3757 // build on that.
3758 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3759                                 SmallVectorImpl<unsigned> &Idxs,
3760                                 unsigned IdxSkip,
3761                                 Instruction *InsertBefore) {
3762   StructType *STy = dyn_cast<StructType>(IndexedType);
3763   if (STy) {
3764     // Save the original To argument so we can modify it
3765     Value *OrigTo = To;
3766     // General case, the type indexed by Idxs is a struct
3767     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3768       // Process each struct element recursively
3769       Idxs.push_back(i);
3770       Value *PrevTo = To;
3771       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3772                              InsertBefore);
3773       Idxs.pop_back();
3774       if (!To) {
3775         // Couldn't find any inserted value for this index? Cleanup
3776         while (PrevTo != OrigTo) {
3777           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3778           PrevTo = Del->getAggregateOperand();
3779           Del->eraseFromParent();
3780         }
3781         // Stop processing elements
3782         break;
3783       }
3784     }
3785     // If we successfully found a value for each of our subaggregates
3786     if (To)
3787       return To;
3788   }
3789   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3790   // the struct's elements had a value that was inserted directly. In the latter
3791   // case, perhaps we can't determine each of the subelements individually, but
3792   // we might be able to find the complete struct somewhere.
3793 
3794   // Find the value that is at that particular spot
3795   Value *V = FindInsertedValue(From, Idxs);
3796 
3797   if (!V)
3798     return nullptr;
3799 
3800   // Insert the value in the new (sub) aggregate
3801   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3802                                  "tmp", InsertBefore);
3803 }
3804 
3805 // This helper takes a nested struct and extracts a part of it (which is again a
3806 // struct) into a new value. For example, given the struct:
3807 // { a, { b, { c, d }, e } }
3808 // and the indices "1, 1" this returns
3809 // { c, d }.
3810 //
3811 // It does this by inserting an insertvalue for each element in the resulting
3812 // struct, as opposed to just inserting a single struct. This will only work if
3813 // each of the elements of the substruct are known (ie, inserted into From by an
3814 // insertvalue instruction somewhere).
3815 //
3816 // All inserted insertvalue instructions are inserted before InsertBefore
3817 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3818                                 Instruction *InsertBefore) {
3819   assert(InsertBefore && "Must have someplace to insert!");
3820   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3821                                                              idx_range);
3822   Value *To = UndefValue::get(IndexedType);
3823   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3824   unsigned IdxSkip = Idxs.size();
3825 
3826   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3827 }
3828 
3829 /// Given an aggregate and a sequence of indices, see if the scalar value
3830 /// indexed is already around as a register, for example if it was inserted
3831 /// directly into the aggregate.
3832 ///
3833 /// If InsertBefore is not null, this function will duplicate (modified)
3834 /// insertvalues when a part of a nested struct is extracted.
3835 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3836                                Instruction *InsertBefore) {
3837   // Nothing to index? Just return V then (this is useful at the end of our
3838   // recursion).
3839   if (idx_range.empty())
3840     return V;
3841   // We have indices, so V should have an indexable type.
3842   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3843          "Not looking at a struct or array?");
3844   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3845          "Invalid indices for type?");
3846 
3847   if (Constant *C = dyn_cast<Constant>(V)) {
3848     C = C->getAggregateElement(idx_range[0]);
3849     if (!C) return nullptr;
3850     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3851   }
3852 
3853   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3854     // Loop the indices for the insertvalue instruction in parallel with the
3855     // requested indices
3856     const unsigned *req_idx = idx_range.begin();
3857     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3858          i != e; ++i, ++req_idx) {
3859       if (req_idx == idx_range.end()) {
3860         // We can't handle this without inserting insertvalues
3861         if (!InsertBefore)
3862           return nullptr;
3863 
3864         // The requested index identifies a part of a nested aggregate. Handle
3865         // this specially. For example,
3866         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3867         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3868         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3869         // This can be changed into
3870         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3871         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3872         // which allows the unused 0,0 element from the nested struct to be
3873         // removed.
3874         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3875                                  InsertBefore);
3876       }
3877 
3878       // This insert value inserts something else than what we are looking for.
3879       // See if the (aggregate) value inserted into has the value we are
3880       // looking for, then.
3881       if (*req_idx != *i)
3882         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3883                                  InsertBefore);
3884     }
3885     // If we end up here, the indices of the insertvalue match with those
3886     // requested (though possibly only partially). Now we recursively look at
3887     // the inserted value, passing any remaining indices.
3888     return FindInsertedValue(I->getInsertedValueOperand(),
3889                              makeArrayRef(req_idx, idx_range.end()),
3890                              InsertBefore);
3891   }
3892 
3893   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3894     // If we're extracting a value from an aggregate that was extracted from
3895     // something else, we can extract from that something else directly instead.
3896     // However, we will need to chain I's indices with the requested indices.
3897 
3898     // Calculate the number of indices required
3899     unsigned size = I->getNumIndices() + idx_range.size();
3900     // Allocate some space to put the new indices in
3901     SmallVector<unsigned, 5> Idxs;
3902     Idxs.reserve(size);
3903     // Add indices from the extract value instruction
3904     Idxs.append(I->idx_begin(), I->idx_end());
3905 
3906     // Add requested indices
3907     Idxs.append(idx_range.begin(), idx_range.end());
3908 
3909     assert(Idxs.size() == size
3910            && "Number of indices added not correct?");
3911 
3912     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3913   }
3914   // Otherwise, we don't know (such as, extracting from a function return value
3915   // or load instruction)
3916   return nullptr;
3917 }
3918 
3919 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3920                                        unsigned CharSize) {
3921   // Make sure the GEP has exactly three arguments.
3922   if (GEP->getNumOperands() != 3)
3923     return false;
3924 
3925   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3926   // CharSize.
3927   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3928   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3929     return false;
3930 
3931   // Check to make sure that the first operand of the GEP is an integer and
3932   // has value 0 so that we are sure we're indexing into the initializer.
3933   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3934   if (!FirstIdx || !FirstIdx->isZero())
3935     return false;
3936 
3937   return true;
3938 }
3939 
3940 bool llvm::getConstantDataArrayInfo(const Value *V,
3941                                     ConstantDataArraySlice &Slice,
3942                                     unsigned ElementSize, uint64_t Offset) {
3943   assert(V);
3944 
3945   // Look through bitcast instructions and geps.
3946   V = V->stripPointerCasts();
3947 
3948   // If the value is a GEP instruction or constant expression, treat it as an
3949   // offset.
3950   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3951     // The GEP operator should be based on a pointer to string constant, and is
3952     // indexing into the string constant.
3953     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3954       return false;
3955 
3956     // If the second index isn't a ConstantInt, then this is a variable index
3957     // into the array.  If this occurs, we can't say anything meaningful about
3958     // the string.
3959     uint64_t StartIdx = 0;
3960     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3961       StartIdx = CI->getZExtValue();
3962     else
3963       return false;
3964     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3965                                     StartIdx + Offset);
3966   }
3967 
3968   // The GEP instruction, constant or instruction, must reference a global
3969   // variable that is a constant and is initialized. The referenced constant
3970   // initializer is the array that we'll use for optimization.
3971   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3972   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3973     return false;
3974 
3975   const ConstantDataArray *Array;
3976   ArrayType *ArrayTy;
3977   if (GV->getInitializer()->isNullValue()) {
3978     Type *GVTy = GV->getValueType();
3979     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3980       // A zeroinitializer for the array; there is no ConstantDataArray.
3981       Array = nullptr;
3982     } else {
3983       const DataLayout &DL = GV->getParent()->getDataLayout();
3984       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
3985       uint64_t Length = SizeInBytes / (ElementSize / 8);
3986       if (Length <= Offset)
3987         return false;
3988 
3989       Slice.Array = nullptr;
3990       Slice.Offset = 0;
3991       Slice.Length = Length - Offset;
3992       return true;
3993     }
3994   } else {
3995     // This must be a ConstantDataArray.
3996     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3997     if (!Array)
3998       return false;
3999     ArrayTy = Array->getType();
4000   }
4001   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
4002     return false;
4003 
4004   uint64_t NumElts = ArrayTy->getArrayNumElements();
4005   if (Offset > NumElts)
4006     return false;
4007 
4008   Slice.Array = Array;
4009   Slice.Offset = Offset;
4010   Slice.Length = NumElts - Offset;
4011   return true;
4012 }
4013 
4014 /// This function computes the length of a null-terminated C string pointed to
4015 /// by V. If successful, it returns true and returns the string in Str.
4016 /// If unsuccessful, it returns false.
4017 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4018                                  uint64_t Offset, bool TrimAtNul) {
4019   ConstantDataArraySlice Slice;
4020   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4021     return false;
4022 
4023   if (Slice.Array == nullptr) {
4024     if (TrimAtNul) {
4025       Str = StringRef();
4026       return true;
4027     }
4028     if (Slice.Length == 1) {
4029       Str = StringRef("", 1);
4030       return true;
4031     }
4032     // We cannot instantiate a StringRef as we do not have an appropriate string
4033     // of 0s at hand.
4034     return false;
4035   }
4036 
4037   // Start out with the entire array in the StringRef.
4038   Str = Slice.Array->getAsString();
4039   // Skip over 'offset' bytes.
4040   Str = Str.substr(Slice.Offset);
4041 
4042   if (TrimAtNul) {
4043     // Trim off the \0 and anything after it.  If the array is not nul
4044     // terminated, we just return the whole end of string.  The client may know
4045     // some other way that the string is length-bound.
4046     Str = Str.substr(0, Str.find('\0'));
4047   }
4048   return true;
4049 }
4050 
4051 // These next two are very similar to the above, but also look through PHI
4052 // nodes.
4053 // TODO: See if we can integrate these two together.
4054 
4055 /// If we can compute the length of the string pointed to by
4056 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4057 static uint64_t GetStringLengthH(const Value *V,
4058                                  SmallPtrSetImpl<const PHINode*> &PHIs,
4059                                  unsigned CharSize) {
4060   // Look through noop bitcast instructions.
4061   V = V->stripPointerCasts();
4062 
4063   // If this is a PHI node, there are two cases: either we have already seen it
4064   // or we haven't.
4065   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4066     if (!PHIs.insert(PN).second)
4067       return ~0ULL;  // already in the set.
4068 
4069     // If it was new, see if all the input strings are the same length.
4070     uint64_t LenSoFar = ~0ULL;
4071     for (Value *IncValue : PN->incoming_values()) {
4072       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4073       if (Len == 0) return 0; // Unknown length -> unknown.
4074 
4075       if (Len == ~0ULL) continue;
4076 
4077       if (Len != LenSoFar && LenSoFar != ~0ULL)
4078         return 0;    // Disagree -> unknown.
4079       LenSoFar = Len;
4080     }
4081 
4082     // Success, all agree.
4083     return LenSoFar;
4084   }
4085 
4086   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4087   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4088     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4089     if (Len1 == 0) return 0;
4090     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4091     if (Len2 == 0) return 0;
4092     if (Len1 == ~0ULL) return Len2;
4093     if (Len2 == ~0ULL) return Len1;
4094     if (Len1 != Len2) return 0;
4095     return Len1;
4096   }
4097 
4098   // Otherwise, see if we can read the string.
4099   ConstantDataArraySlice Slice;
4100   if (!getConstantDataArrayInfo(V, Slice, CharSize))
4101     return 0;
4102 
4103   if (Slice.Array == nullptr)
4104     return 1;
4105 
4106   // Search for nul characters
4107   unsigned NullIndex = 0;
4108   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4109     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4110       break;
4111   }
4112 
4113   return NullIndex + 1;
4114 }
4115 
4116 /// If we can compute the length of the string pointed to by
4117 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4118 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4119   if (!V->getType()->isPointerTy())
4120     return 0;
4121 
4122   SmallPtrSet<const PHINode*, 32> PHIs;
4123   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4124   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4125   // an empty string as a length.
4126   return Len == ~0ULL ? 1 : Len;
4127 }
4128 
4129 const Value *
4130 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4131                                            bool MustPreserveNullness) {
4132   assert(Call &&
4133          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4134   if (const Value *RV = Call->getReturnedArgOperand())
4135     return RV;
4136   // This can be used only as a aliasing property.
4137   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4138           Call, MustPreserveNullness))
4139     return Call->getArgOperand(0);
4140   return nullptr;
4141 }
4142 
4143 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4144     const CallBase *Call, bool MustPreserveNullness) {
4145   switch (Call->getIntrinsicID()) {
4146   case Intrinsic::launder_invariant_group:
4147   case Intrinsic::strip_invariant_group:
4148   case Intrinsic::aarch64_irg:
4149   case Intrinsic::aarch64_tagp:
4150     return true;
4151   case Intrinsic::ptrmask:
4152     return !MustPreserveNullness;
4153   default:
4154     return false;
4155   }
4156 }
4157 
4158 /// \p PN defines a loop-variant pointer to an object.  Check if the
4159 /// previous iteration of the loop was referring to the same object as \p PN.
4160 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4161                                          const LoopInfo *LI) {
4162   // Find the loop-defined value.
4163   Loop *L = LI->getLoopFor(PN->getParent());
4164   if (PN->getNumIncomingValues() != 2)
4165     return true;
4166 
4167   // Find the value from previous iteration.
4168   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4169   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4170     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4171   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4172     return true;
4173 
4174   // If a new pointer is loaded in the loop, the pointer references a different
4175   // object in every iteration.  E.g.:
4176   //    for (i)
4177   //       int *p = a[i];
4178   //       ...
4179   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4180     if (!L->isLoopInvariant(Load->getPointerOperand()))
4181       return false;
4182   return true;
4183 }
4184 
4185 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4186   if (!V->getType()->isPointerTy())
4187     return V;
4188   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4189     if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4190       V = GEP->getPointerOperand();
4191     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4192                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4193       V = cast<Operator>(V)->getOperand(0);
4194       if (!V->getType()->isPointerTy())
4195         return V;
4196     } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4197       if (GA->isInterposable())
4198         return V;
4199       V = GA->getAliasee();
4200     } else {
4201       if (auto *PHI = dyn_cast<PHINode>(V)) {
4202         // Look through single-arg phi nodes created by LCSSA.
4203         if (PHI->getNumIncomingValues() == 1) {
4204           V = PHI->getIncomingValue(0);
4205           continue;
4206         }
4207       } else if (auto *Call = dyn_cast<CallBase>(V)) {
4208         // CaptureTracking can know about special capturing properties of some
4209         // intrinsics like launder.invariant.group, that can't be expressed with
4210         // the attributes, but have properties like returning aliasing pointer.
4211         // Because some analysis may assume that nocaptured pointer is not
4212         // returned from some special intrinsic (because function would have to
4213         // be marked with returns attribute), it is crucial to use this function
4214         // because it should be in sync with CaptureTracking. Not using it may
4215         // cause weird miscompilations where 2 aliasing pointers are assumed to
4216         // noalias.
4217         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4218           V = RP;
4219           continue;
4220         }
4221       }
4222 
4223       return V;
4224     }
4225     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4226   }
4227   return V;
4228 }
4229 
4230 void llvm::getUnderlyingObjects(const Value *V,
4231                                 SmallVectorImpl<const Value *> &Objects,
4232                                 LoopInfo *LI, unsigned MaxLookup) {
4233   SmallPtrSet<const Value *, 4> Visited;
4234   SmallVector<const Value *, 4> Worklist;
4235   Worklist.push_back(V);
4236   do {
4237     const Value *P = Worklist.pop_back_val();
4238     P = getUnderlyingObject(P, MaxLookup);
4239 
4240     if (!Visited.insert(P).second)
4241       continue;
4242 
4243     if (auto *SI = dyn_cast<SelectInst>(P)) {
4244       Worklist.push_back(SI->getTrueValue());
4245       Worklist.push_back(SI->getFalseValue());
4246       continue;
4247     }
4248 
4249     if (auto *PN = dyn_cast<PHINode>(P)) {
4250       // If this PHI changes the underlying object in every iteration of the
4251       // loop, don't look through it.  Consider:
4252       //   int **A;
4253       //   for (i) {
4254       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
4255       //     Curr = A[i];
4256       //     *Prev, *Curr;
4257       //
4258       // Prev is tracking Curr one iteration behind so they refer to different
4259       // underlying objects.
4260       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4261           isSameUnderlyingObjectInLoop(PN, LI))
4262         append_range(Worklist, PN->incoming_values());
4263       continue;
4264     }
4265 
4266     Objects.push_back(P);
4267   } while (!Worklist.empty());
4268 }
4269 
4270 /// This is the function that does the work of looking through basic
4271 /// ptrtoint+arithmetic+inttoptr sequences.
4272 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4273   do {
4274     if (const Operator *U = dyn_cast<Operator>(V)) {
4275       // If we find a ptrtoint, we can transfer control back to the
4276       // regular getUnderlyingObjectFromInt.
4277       if (U->getOpcode() == Instruction::PtrToInt)
4278         return U->getOperand(0);
4279       // If we find an add of a constant, a multiplied value, or a phi, it's
4280       // likely that the other operand will lead us to the base
4281       // object. We don't have to worry about the case where the
4282       // object address is somehow being computed by the multiply,
4283       // because our callers only care when the result is an
4284       // identifiable object.
4285       if (U->getOpcode() != Instruction::Add ||
4286           (!isa<ConstantInt>(U->getOperand(1)) &&
4287            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4288            !isa<PHINode>(U->getOperand(1))))
4289         return V;
4290       V = U->getOperand(0);
4291     } else {
4292       return V;
4293     }
4294     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4295   } while (true);
4296 }
4297 
4298 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4299 /// ptrtoint+arithmetic+inttoptr sequences.
4300 /// It returns false if unidentified object is found in getUnderlyingObjects.
4301 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4302                                           SmallVectorImpl<Value *> &Objects) {
4303   SmallPtrSet<const Value *, 16> Visited;
4304   SmallVector<const Value *, 4> Working(1, V);
4305   do {
4306     V = Working.pop_back_val();
4307 
4308     SmallVector<const Value *, 4> Objs;
4309     getUnderlyingObjects(V, Objs);
4310 
4311     for (const Value *V : Objs) {
4312       if (!Visited.insert(V).second)
4313         continue;
4314       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4315         const Value *O =
4316           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4317         if (O->getType()->isPointerTy()) {
4318           Working.push_back(O);
4319           continue;
4320         }
4321       }
4322       // If getUnderlyingObjects fails to find an identifiable object,
4323       // getUnderlyingObjectsForCodeGen also fails for safety.
4324       if (!isIdentifiedObject(V)) {
4325         Objects.clear();
4326         return false;
4327       }
4328       Objects.push_back(const_cast<Value *>(V));
4329     }
4330   } while (!Working.empty());
4331   return true;
4332 }
4333 
4334 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4335   AllocaInst *Result = nullptr;
4336   SmallPtrSet<Value *, 4> Visited;
4337   SmallVector<Value *, 4> Worklist;
4338 
4339   auto AddWork = [&](Value *V) {
4340     if (Visited.insert(V).second)
4341       Worklist.push_back(V);
4342   };
4343 
4344   AddWork(V);
4345   do {
4346     V = Worklist.pop_back_val();
4347     assert(Visited.count(V));
4348 
4349     if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4350       if (Result && Result != AI)
4351         return nullptr;
4352       Result = AI;
4353     } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4354       AddWork(CI->getOperand(0));
4355     } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4356       for (Value *IncValue : PN->incoming_values())
4357         AddWork(IncValue);
4358     } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4359       AddWork(SI->getTrueValue());
4360       AddWork(SI->getFalseValue());
4361     } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4362       if (OffsetZero && !GEP->hasAllZeroIndices())
4363         return nullptr;
4364       AddWork(GEP->getPointerOperand());
4365     } else {
4366       return nullptr;
4367     }
4368   } while (!Worklist.empty());
4369 
4370   return Result;
4371 }
4372 
4373 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4374     const Value *V, bool AllowLifetime, bool AllowDroppable) {
4375   for (const User *U : V->users()) {
4376     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4377     if (!II)
4378       return false;
4379 
4380     if (AllowLifetime && II->isLifetimeStartOrEnd())
4381       continue;
4382 
4383     if (AllowDroppable && II->isDroppable())
4384       continue;
4385 
4386     return false;
4387   }
4388   return true;
4389 }
4390 
4391 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4392   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4393       V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4394 }
4395 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4396   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4397       V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4398 }
4399 
4400 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4401   if (!LI.isUnordered())
4402     return true;
4403   const Function &F = *LI.getFunction();
4404   // Speculative load may create a race that did not exist in the source.
4405   return F.hasFnAttribute(Attribute::SanitizeThread) ||
4406     // Speculative load may load data from dirty regions.
4407     F.hasFnAttribute(Attribute::SanitizeAddress) ||
4408     F.hasFnAttribute(Attribute::SanitizeHWAddress);
4409 }
4410 
4411 
4412 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4413                                         const Instruction *CtxI,
4414                                         const DominatorTree *DT,
4415                                         const TargetLibraryInfo *TLI) {
4416   const Operator *Inst = dyn_cast<Operator>(V);
4417   if (!Inst)
4418     return false;
4419 
4420   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4421     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4422       if (C->canTrap())
4423         return false;
4424 
4425   switch (Inst->getOpcode()) {
4426   default:
4427     return true;
4428   case Instruction::UDiv:
4429   case Instruction::URem: {
4430     // x / y is undefined if y == 0.
4431     const APInt *V;
4432     if (match(Inst->getOperand(1), m_APInt(V)))
4433       return *V != 0;
4434     return false;
4435   }
4436   case Instruction::SDiv:
4437   case Instruction::SRem: {
4438     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4439     const APInt *Numerator, *Denominator;
4440     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4441       return false;
4442     // We cannot hoist this division if the denominator is 0.
4443     if (*Denominator == 0)
4444       return false;
4445     // It's safe to hoist if the denominator is not 0 or -1.
4446     if (!Denominator->isAllOnesValue())
4447       return true;
4448     // At this point we know that the denominator is -1.  It is safe to hoist as
4449     // long we know that the numerator is not INT_MIN.
4450     if (match(Inst->getOperand(0), m_APInt(Numerator)))
4451       return !Numerator->isMinSignedValue();
4452     // The numerator *might* be MinSignedValue.
4453     return false;
4454   }
4455   case Instruction::Load: {
4456     const LoadInst *LI = cast<LoadInst>(Inst);
4457     if (mustSuppressSpeculation(*LI))
4458       return false;
4459     const DataLayout &DL = LI->getModule()->getDataLayout();
4460     return isDereferenceableAndAlignedPointer(
4461         LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4462         DL, CtxI, DT, TLI);
4463   }
4464   case Instruction::Call: {
4465     auto *CI = cast<const CallInst>(Inst);
4466     const Function *Callee = CI->getCalledFunction();
4467 
4468     // The called function could have undefined behavior or side-effects, even
4469     // if marked readnone nounwind.
4470     return Callee && Callee->isSpeculatable();
4471   }
4472   case Instruction::VAArg:
4473   case Instruction::Alloca:
4474   case Instruction::Invoke:
4475   case Instruction::CallBr:
4476   case Instruction::PHI:
4477   case Instruction::Store:
4478   case Instruction::Ret:
4479   case Instruction::Br:
4480   case Instruction::IndirectBr:
4481   case Instruction::Switch:
4482   case Instruction::Unreachable:
4483   case Instruction::Fence:
4484   case Instruction::AtomicRMW:
4485   case Instruction::AtomicCmpXchg:
4486   case Instruction::LandingPad:
4487   case Instruction::Resume:
4488   case Instruction::CatchSwitch:
4489   case Instruction::CatchPad:
4490   case Instruction::CatchRet:
4491   case Instruction::CleanupPad:
4492   case Instruction::CleanupRet:
4493     return false; // Misc instructions which have effects
4494   }
4495 }
4496 
4497 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4498   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4499 }
4500 
4501 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4502 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4503   switch (OR) {
4504     case ConstantRange::OverflowResult::MayOverflow:
4505       return OverflowResult::MayOverflow;
4506     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4507       return OverflowResult::AlwaysOverflowsLow;
4508     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4509       return OverflowResult::AlwaysOverflowsHigh;
4510     case ConstantRange::OverflowResult::NeverOverflows:
4511       return OverflowResult::NeverOverflows;
4512   }
4513   llvm_unreachable("Unknown OverflowResult");
4514 }
4515 
4516 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4517 static ConstantRange computeConstantRangeIncludingKnownBits(
4518     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4519     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4520     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4521   KnownBits Known = computeKnownBits(
4522       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4523   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4524   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4525   ConstantRange::PreferredRangeType RangeType =
4526       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4527   return CR1.intersectWith(CR2, RangeType);
4528 }
4529 
4530 OverflowResult llvm::computeOverflowForUnsignedMul(
4531     const Value *LHS, const Value *RHS, const DataLayout &DL,
4532     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4533     bool UseInstrInfo) {
4534   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4535                                         nullptr, UseInstrInfo);
4536   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4537                                         nullptr, UseInstrInfo);
4538   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4539   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4540   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4541 }
4542 
4543 OverflowResult
4544 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4545                                   const DataLayout &DL, AssumptionCache *AC,
4546                                   const Instruction *CxtI,
4547                                   const DominatorTree *DT, bool UseInstrInfo) {
4548   // Multiplying n * m significant bits yields a result of n + m significant
4549   // bits. If the total number of significant bits does not exceed the
4550   // result bit width (minus 1), there is no overflow.
4551   // This means if we have enough leading sign bits in the operands
4552   // we can guarantee that the result does not overflow.
4553   // Ref: "Hacker's Delight" by Henry Warren
4554   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4555 
4556   // Note that underestimating the number of sign bits gives a more
4557   // conservative answer.
4558   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4559                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4560 
4561   // First handle the easy case: if we have enough sign bits there's
4562   // definitely no overflow.
4563   if (SignBits > BitWidth + 1)
4564     return OverflowResult::NeverOverflows;
4565 
4566   // There are two ambiguous cases where there can be no overflow:
4567   //   SignBits == BitWidth + 1    and
4568   //   SignBits == BitWidth
4569   // The second case is difficult to check, therefore we only handle the
4570   // first case.
4571   if (SignBits == BitWidth + 1) {
4572     // It overflows only when both arguments are negative and the true
4573     // product is exactly the minimum negative number.
4574     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4575     // For simplicity we just check if at least one side is not negative.
4576     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4577                                           nullptr, UseInstrInfo);
4578     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4579                                           nullptr, UseInstrInfo);
4580     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4581       return OverflowResult::NeverOverflows;
4582   }
4583   return OverflowResult::MayOverflow;
4584 }
4585 
4586 OverflowResult llvm::computeOverflowForUnsignedAdd(
4587     const Value *LHS, const Value *RHS, const DataLayout &DL,
4588     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4589     bool UseInstrInfo) {
4590   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4591       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4592       nullptr, UseInstrInfo);
4593   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4594       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4595       nullptr, UseInstrInfo);
4596   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4597 }
4598 
4599 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4600                                                   const Value *RHS,
4601                                                   const AddOperator *Add,
4602                                                   const DataLayout &DL,
4603                                                   AssumptionCache *AC,
4604                                                   const Instruction *CxtI,
4605                                                   const DominatorTree *DT) {
4606   if (Add && Add->hasNoSignedWrap()) {
4607     return OverflowResult::NeverOverflows;
4608   }
4609 
4610   // If LHS and RHS each have at least two sign bits, the addition will look
4611   // like
4612   //
4613   // XX..... +
4614   // YY.....
4615   //
4616   // If the carry into the most significant position is 0, X and Y can't both
4617   // be 1 and therefore the carry out of the addition is also 0.
4618   //
4619   // If the carry into the most significant position is 1, X and Y can't both
4620   // be 0 and therefore the carry out of the addition is also 1.
4621   //
4622   // Since the carry into the most significant position is always equal to
4623   // the carry out of the addition, there is no signed overflow.
4624   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4625       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4626     return OverflowResult::NeverOverflows;
4627 
4628   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4629       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4630   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4631       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4632   OverflowResult OR =
4633       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4634   if (OR != OverflowResult::MayOverflow)
4635     return OR;
4636 
4637   // The remaining code needs Add to be available. Early returns if not so.
4638   if (!Add)
4639     return OverflowResult::MayOverflow;
4640 
4641   // If the sign of Add is the same as at least one of the operands, this add
4642   // CANNOT overflow. If this can be determined from the known bits of the
4643   // operands the above signedAddMayOverflow() check will have already done so.
4644   // The only other way to improve on the known bits is from an assumption, so
4645   // call computeKnownBitsFromAssume() directly.
4646   bool LHSOrRHSKnownNonNegative =
4647       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4648   bool LHSOrRHSKnownNegative =
4649       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4650   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4651     KnownBits AddKnown(LHSRange.getBitWidth());
4652     computeKnownBitsFromAssume(
4653         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4654     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4655         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4656       return OverflowResult::NeverOverflows;
4657   }
4658 
4659   return OverflowResult::MayOverflow;
4660 }
4661 
4662 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4663                                                    const Value *RHS,
4664                                                    const DataLayout &DL,
4665                                                    AssumptionCache *AC,
4666                                                    const Instruction *CxtI,
4667                                                    const DominatorTree *DT) {
4668   // Checking for conditions implied by dominating conditions may be expensive.
4669   // Limit it to usub_with_overflow calls for now.
4670   if (match(CxtI,
4671             m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4672     if (auto C =
4673             isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4674       if (*C)
4675         return OverflowResult::NeverOverflows;
4676       return OverflowResult::AlwaysOverflowsLow;
4677     }
4678   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4679       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4680   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4681       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4682   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4683 }
4684 
4685 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4686                                                  const Value *RHS,
4687                                                  const DataLayout &DL,
4688                                                  AssumptionCache *AC,
4689                                                  const Instruction *CxtI,
4690                                                  const DominatorTree *DT) {
4691   // If LHS and RHS each have at least two sign bits, the subtraction
4692   // cannot overflow.
4693   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4694       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4695     return OverflowResult::NeverOverflows;
4696 
4697   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4698       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4699   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4700       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4701   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4702 }
4703 
4704 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4705                                      const DominatorTree &DT) {
4706   SmallVector<const BranchInst *, 2> GuardingBranches;
4707   SmallVector<const ExtractValueInst *, 2> Results;
4708 
4709   for (const User *U : WO->users()) {
4710     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4711       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4712 
4713       if (EVI->getIndices()[0] == 0)
4714         Results.push_back(EVI);
4715       else {
4716         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4717 
4718         for (const auto *U : EVI->users())
4719           if (const auto *B = dyn_cast<BranchInst>(U)) {
4720             assert(B->isConditional() && "How else is it using an i1?");
4721             GuardingBranches.push_back(B);
4722           }
4723       }
4724     } else {
4725       // We are using the aggregate directly in a way we don't want to analyze
4726       // here (storing it to a global, say).
4727       return false;
4728     }
4729   }
4730 
4731   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4732     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4733     if (!NoWrapEdge.isSingleEdge())
4734       return false;
4735 
4736     // Check if all users of the add are provably no-wrap.
4737     for (const auto *Result : Results) {
4738       // If the extractvalue itself is not executed on overflow, the we don't
4739       // need to check each use separately, since domination is transitive.
4740       if (DT.dominates(NoWrapEdge, Result->getParent()))
4741         continue;
4742 
4743       for (auto &RU : Result->uses())
4744         if (!DT.dominates(NoWrapEdge, RU))
4745           return false;
4746     }
4747 
4748     return true;
4749   };
4750 
4751   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4752 }
4753 
4754 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4755   // See whether I has flags that may create poison
4756   if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4757     if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4758       return true;
4759   }
4760   if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4761     if (ExactOp->isExact())
4762       return true;
4763   if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4764     auto FMF = FP->getFastMathFlags();
4765     if (FMF.noNaNs() || FMF.noInfs())
4766       return true;
4767   }
4768 
4769   unsigned Opcode = Op->getOpcode();
4770 
4771   // Check whether opcode is a poison/undef-generating operation
4772   switch (Opcode) {
4773   case Instruction::Shl:
4774   case Instruction::AShr:
4775   case Instruction::LShr: {
4776     // Shifts return poison if shiftwidth is larger than the bitwidth.
4777     if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4778       SmallVector<Constant *, 4> ShiftAmounts;
4779       if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4780         unsigned NumElts = FVTy->getNumElements();
4781         for (unsigned i = 0; i < NumElts; ++i)
4782           ShiftAmounts.push_back(C->getAggregateElement(i));
4783       } else if (isa<ScalableVectorType>(C->getType()))
4784         return true; // Can't tell, just return true to be safe
4785       else
4786         ShiftAmounts.push_back(C);
4787 
4788       bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4789         auto *CI = dyn_cast_or_null<ConstantInt>(C);
4790         return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4791       });
4792       return !Safe;
4793     }
4794     return true;
4795   }
4796   case Instruction::FPToSI:
4797   case Instruction::FPToUI:
4798     // fptosi/ui yields poison if the resulting value does not fit in the
4799     // destination type.
4800     return true;
4801   case Instruction::Call:
4802     if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
4803       switch (II->getIntrinsicID()) {
4804       // TODO: Add more intrinsics.
4805       case Intrinsic::ctpop:
4806         return false;
4807       }
4808     }
4809     LLVM_FALLTHROUGH;
4810   case Instruction::CallBr:
4811   case Instruction::Invoke: {
4812     const auto *CB = cast<CallBase>(Op);
4813     return !CB->hasRetAttr(Attribute::NoUndef);
4814   }
4815   case Instruction::InsertElement:
4816   case Instruction::ExtractElement: {
4817     // If index exceeds the length of the vector, it returns poison
4818     auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4819     unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4820     auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4821     if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
4822       return true;
4823     return false;
4824   }
4825   case Instruction::ShuffleVector: {
4826     // shufflevector may return undef.
4827     if (PoisonOnly)
4828       return false;
4829     ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4830                              ? cast<ConstantExpr>(Op)->getShuffleMask()
4831                              : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4832     return is_contained(Mask, UndefMaskElem);
4833   }
4834   case Instruction::FNeg:
4835   case Instruction::PHI:
4836   case Instruction::Select:
4837   case Instruction::URem:
4838   case Instruction::SRem:
4839   case Instruction::ExtractValue:
4840   case Instruction::InsertValue:
4841   case Instruction::Freeze:
4842   case Instruction::ICmp:
4843   case Instruction::FCmp:
4844     return false;
4845   case Instruction::GetElementPtr: {
4846     const auto *GEP = cast<GEPOperator>(Op);
4847     return GEP->isInBounds();
4848   }
4849   default: {
4850     const auto *CE = dyn_cast<ConstantExpr>(Op);
4851     if (isa<CastInst>(Op) || (CE && CE->isCast()))
4852       return false;
4853     else if (Instruction::isBinaryOp(Opcode))
4854       return false;
4855     // Be conservative and return true.
4856     return true;
4857   }
4858   }
4859 }
4860 
4861 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
4862   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
4863 }
4864 
4865 bool llvm::canCreatePoison(const Operator *Op) {
4866   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
4867 }
4868 
4869 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
4870                                   const Value *V, unsigned Depth) {
4871   if (ValAssumedPoison == V)
4872     return true;
4873 
4874   const unsigned MaxDepth = 2;
4875   if (Depth >= MaxDepth)
4876     return false;
4877 
4878   if (const auto *I = dyn_cast<Instruction>(V)) {
4879     if (propagatesPoison(cast<Operator>(I)))
4880       return any_of(I->operands(), [=](const Value *Op) {
4881         return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
4882       });
4883 
4884     // 'select ValAssumedPoison, _, _' is poison.
4885     if (const auto *SI = dyn_cast<SelectInst>(I))
4886       return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
4887                                    Depth + 1);
4888     // V  = extractvalue V0, idx
4889     // V2 = extractvalue V0, idx2
4890     // V0's elements are all poison or not. (e.g., add_with_overflow)
4891     const WithOverflowInst *II;
4892     if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
4893         match(ValAssumedPoison, m_ExtractValue(m_Specific(II))))
4894       return true;
4895   }
4896   return false;
4897 }
4898 
4899 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
4900                           unsigned Depth) {
4901   if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
4902     return true;
4903 
4904   if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
4905     return true;
4906 
4907   const unsigned MaxDepth = 2;
4908   if (Depth >= MaxDepth)
4909     return false;
4910 
4911   const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
4912   if (I && !canCreatePoison(cast<Operator>(I))) {
4913     return all_of(I->operands(), [=](const Value *Op) {
4914       return impliesPoison(Op, V, Depth + 1);
4915     });
4916   }
4917   return false;
4918 }
4919 
4920 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
4921   return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
4922 }
4923 
4924 static bool programUndefinedIfUndefOrPoison(const Value *V,
4925                                             bool PoisonOnly);
4926 
4927 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
4928                                              AssumptionCache *AC,
4929                                              const Instruction *CtxI,
4930                                              const DominatorTree *DT,
4931                                              unsigned Depth, bool PoisonOnly) {
4932   if (Depth >= MaxAnalysisRecursionDepth)
4933     return false;
4934 
4935   if (isa<MetadataAsValue>(V))
4936     return false;
4937 
4938   if (const auto *A = dyn_cast<Argument>(V)) {
4939     if (A->hasAttribute(Attribute::NoUndef))
4940       return true;
4941   }
4942 
4943   if (auto *C = dyn_cast<Constant>(V)) {
4944     if (isa<UndefValue>(C))
4945       return PoisonOnly && !isa<PoisonValue>(C);
4946 
4947     if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
4948         isa<ConstantPointerNull>(C) || isa<Function>(C))
4949       return true;
4950 
4951     if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
4952       return (PoisonOnly ? !C->containsPoisonElement()
4953                          : !C->containsUndefOrPoisonElement()) &&
4954              !C->containsConstantExpression();
4955   }
4956 
4957   // Strip cast operations from a pointer value.
4958   // Note that stripPointerCastsSameRepresentation can strip off getelementptr
4959   // inbounds with zero offset. To guarantee that the result isn't poison, the
4960   // stripped pointer is checked as it has to be pointing into an allocated
4961   // object or be null `null` to ensure `inbounds` getelement pointers with a
4962   // zero offset could not produce poison.
4963   // It can strip off addrspacecast that do not change bit representation as
4964   // well. We believe that such addrspacecast is equivalent to no-op.
4965   auto *StrippedV = V->stripPointerCastsSameRepresentation();
4966   if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
4967       isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
4968     return true;
4969 
4970   auto OpCheck = [&](const Value *V) {
4971     return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
4972                                             PoisonOnly);
4973   };
4974 
4975   if (auto *Opr = dyn_cast<Operator>(V)) {
4976     // If the value is a freeze instruction, then it can never
4977     // be undef or poison.
4978     if (isa<FreezeInst>(V))
4979       return true;
4980 
4981     if (const auto *CB = dyn_cast<CallBase>(V)) {
4982       if (CB->hasRetAttr(Attribute::NoUndef))
4983         return true;
4984     }
4985 
4986     if (const auto *PN = dyn_cast<PHINode>(V)) {
4987       unsigned Num = PN->getNumIncomingValues();
4988       bool IsWellDefined = true;
4989       for (unsigned i = 0; i < Num; ++i) {
4990         auto *TI = PN->getIncomingBlock(i)->getTerminator();
4991         if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
4992                                               DT, Depth + 1, PoisonOnly)) {
4993           IsWellDefined = false;
4994           break;
4995         }
4996       }
4997       if (IsWellDefined)
4998         return true;
4999     } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
5000       return true;
5001   }
5002 
5003   if (auto *I = dyn_cast<LoadInst>(V))
5004     if (I->getMetadata(LLVMContext::MD_noundef))
5005       return true;
5006 
5007   if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
5008     return true;
5009 
5010   // CxtI may be null or a cloned instruction.
5011   if (!CtxI || !CtxI->getParent() || !DT)
5012     return false;
5013 
5014   auto *DNode = DT->getNode(CtxI->getParent());
5015   if (!DNode)
5016     // Unreachable block
5017     return false;
5018 
5019   // If V is used as a branch condition before reaching CtxI, V cannot be
5020   // undef or poison.
5021   //   br V, BB1, BB2
5022   // BB1:
5023   //   CtxI ; V cannot be undef or poison here
5024   auto *Dominator = DNode->getIDom();
5025   while (Dominator) {
5026     auto *TI = Dominator->getBlock()->getTerminator();
5027 
5028     Value *Cond = nullptr;
5029     if (auto BI = dyn_cast<BranchInst>(TI)) {
5030       if (BI->isConditional())
5031         Cond = BI->getCondition();
5032     } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
5033       Cond = SI->getCondition();
5034     }
5035 
5036     if (Cond) {
5037       if (Cond == V)
5038         return true;
5039       else if (PoisonOnly && isa<Operator>(Cond)) {
5040         // For poison, we can analyze further
5041         auto *Opr = cast<Operator>(Cond);
5042         if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
5043           return true;
5044       }
5045     }
5046 
5047     Dominator = Dominator->getIDom();
5048   }
5049 
5050   SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NoUndef};
5051   if (getKnowledgeValidInContext(V, AttrKinds, CtxI, DT, AC))
5052     return true;
5053 
5054   return false;
5055 }
5056 
5057 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5058                                             const Instruction *CtxI,
5059                                             const DominatorTree *DT,
5060                                             unsigned Depth) {
5061   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5062 }
5063 
5064 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5065                                      const Instruction *CtxI,
5066                                      const DominatorTree *DT, unsigned Depth) {
5067   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5068 }
5069 
5070 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5071                                                  const DataLayout &DL,
5072                                                  AssumptionCache *AC,
5073                                                  const Instruction *CxtI,
5074                                                  const DominatorTree *DT) {
5075   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5076                                        Add, DL, AC, CxtI, DT);
5077 }
5078 
5079 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5080                                                  const Value *RHS,
5081                                                  const DataLayout &DL,
5082                                                  AssumptionCache *AC,
5083                                                  const Instruction *CxtI,
5084                                                  const DominatorTree *DT) {
5085   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5086 }
5087 
5088 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5089   // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5090   // of time because it's possible for another thread to interfere with it for an
5091   // arbitrary length of time, but programs aren't allowed to rely on that.
5092 
5093   // If there is no successor, then execution can't transfer to it.
5094   if (isa<ReturnInst>(I))
5095     return false;
5096   if (isa<UnreachableInst>(I))
5097     return false;
5098 
5099   // An instruction that returns without throwing must transfer control flow
5100   // to a successor.
5101   return !I->mayThrow() && I->willReturn();
5102 }
5103 
5104 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5105   // TODO: This is slightly conservative for invoke instruction since exiting
5106   // via an exception *is* normal control for them.
5107   for (const Instruction &I : *BB)
5108     if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5109       return false;
5110   return true;
5111 }
5112 
5113 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5114                                                   const Loop *L) {
5115   // The loop header is guaranteed to be executed for every iteration.
5116   //
5117   // FIXME: Relax this constraint to cover all basic blocks that are
5118   // guaranteed to be executed at every iteration.
5119   if (I->getParent() != L->getHeader()) return false;
5120 
5121   for (const Instruction &LI : *L->getHeader()) {
5122     if (&LI == I) return true;
5123     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5124   }
5125   llvm_unreachable("Instruction not contained in its own parent basic block.");
5126 }
5127 
5128 bool llvm::propagatesPoison(const Operator *I) {
5129   switch (I->getOpcode()) {
5130   case Instruction::Freeze:
5131   case Instruction::Select:
5132   case Instruction::PHI:
5133   case Instruction::Call:
5134   case Instruction::Invoke:
5135     return false;
5136   case Instruction::ICmp:
5137   case Instruction::FCmp:
5138   case Instruction::GetElementPtr:
5139     return true;
5140   default:
5141     if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5142       return true;
5143 
5144     // Be conservative and return false.
5145     return false;
5146   }
5147 }
5148 
5149 void llvm::getGuaranteedWellDefinedOps(
5150     const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
5151   switch (I->getOpcode()) {
5152     case Instruction::Store:
5153       Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5154       break;
5155 
5156     case Instruction::Load:
5157       Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5158       break;
5159 
5160     // Since dereferenceable attribute imply noundef, atomic operations
5161     // also implicitly have noundef pointers too
5162     case Instruction::AtomicCmpXchg:
5163       Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5164       break;
5165 
5166     case Instruction::AtomicRMW:
5167       Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5168       break;
5169 
5170     case Instruction::Call:
5171     case Instruction::Invoke: {
5172       const CallBase *CB = cast<CallBase>(I);
5173       if (CB->isIndirectCall())
5174         Operands.insert(CB->getCalledOperand());
5175       for (unsigned i = 0; i < CB->arg_size(); ++i) {
5176         if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5177             CB->paramHasAttr(i, Attribute::Dereferenceable))
5178           Operands.insert(CB->getArgOperand(i));
5179       }
5180       break;
5181     }
5182 
5183     default:
5184       break;
5185   }
5186 }
5187 
5188 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5189                                      SmallPtrSetImpl<const Value *> &Operands) {
5190   getGuaranteedWellDefinedOps(I, Operands);
5191   switch (I->getOpcode()) {
5192   // Divisors of these operations are allowed to be partially undef.
5193   case Instruction::UDiv:
5194   case Instruction::SDiv:
5195   case Instruction::URem:
5196   case Instruction::SRem:
5197     Operands.insert(I->getOperand(1));
5198     break;
5199 
5200   default:
5201     break;
5202   }
5203 }
5204 
5205 bool llvm::mustTriggerUB(const Instruction *I,
5206                          const SmallSet<const Value *, 16>& KnownPoison) {
5207   SmallPtrSet<const Value *, 4> NonPoisonOps;
5208   getGuaranteedNonPoisonOps(I, NonPoisonOps);
5209 
5210   for (const auto *V : NonPoisonOps)
5211     if (KnownPoison.count(V))
5212       return true;
5213 
5214   return false;
5215 }
5216 
5217 static bool programUndefinedIfUndefOrPoison(const Value *V,
5218                                             bool PoisonOnly) {
5219   // We currently only look for uses of values within the same basic
5220   // block, as that makes it easier to guarantee that the uses will be
5221   // executed given that Inst is executed.
5222   //
5223   // FIXME: Expand this to consider uses beyond the same basic block. To do
5224   // this, look out for the distinction between post-dominance and strong
5225   // post-dominance.
5226   const BasicBlock *BB = nullptr;
5227   BasicBlock::const_iterator Begin;
5228   if (const auto *Inst = dyn_cast<Instruction>(V)) {
5229     BB = Inst->getParent();
5230     Begin = Inst->getIterator();
5231     Begin++;
5232   } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5233     BB = &Arg->getParent()->getEntryBlock();
5234     Begin = BB->begin();
5235   } else {
5236     return false;
5237   }
5238 
5239   BasicBlock::const_iterator End = BB->end();
5240 
5241   if (!PoisonOnly) {
5242     // Since undef does not propagate eagerly, be conservative & just check
5243     // whether a value is directly passed to an instruction that must take
5244     // well-defined operands.
5245 
5246     for (auto &I : make_range(Begin, End)) {
5247       SmallPtrSet<const Value *, 4> WellDefinedOps;
5248       getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5249       for (auto *Op : WellDefinedOps) {
5250         if (Op == V)
5251           return true;
5252       }
5253       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5254         break;
5255     }
5256     return false;
5257   }
5258 
5259   // Set of instructions that we have proved will yield poison if Inst
5260   // does.
5261   SmallSet<const Value *, 16> YieldsPoison;
5262   SmallSet<const BasicBlock *, 4> Visited;
5263 
5264   YieldsPoison.insert(V);
5265   auto Propagate = [&](const User *User) {
5266     if (propagatesPoison(cast<Operator>(User)))
5267       YieldsPoison.insert(User);
5268   };
5269   for_each(V->users(), Propagate);
5270   Visited.insert(BB);
5271 
5272   unsigned Iter = 0;
5273   while (Iter++ < MaxAnalysisRecursionDepth) {
5274     for (auto &I : make_range(Begin, End)) {
5275       if (mustTriggerUB(&I, YieldsPoison))
5276         return true;
5277       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5278         return false;
5279 
5280       // Mark poison that propagates from I through uses of I.
5281       if (YieldsPoison.count(&I))
5282         for_each(I.users(), Propagate);
5283     }
5284 
5285     if (auto *NextBB = BB->getSingleSuccessor()) {
5286       if (Visited.insert(NextBB).second) {
5287         BB = NextBB;
5288         Begin = BB->getFirstNonPHI()->getIterator();
5289         End = BB->end();
5290         continue;
5291       }
5292     }
5293 
5294     break;
5295   }
5296   return false;
5297 }
5298 
5299 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5300   return ::programUndefinedIfUndefOrPoison(Inst, false);
5301 }
5302 
5303 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5304   return ::programUndefinedIfUndefOrPoison(Inst, true);
5305 }
5306 
5307 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5308   if (FMF.noNaNs())
5309     return true;
5310 
5311   if (auto *C = dyn_cast<ConstantFP>(V))
5312     return !C->isNaN();
5313 
5314   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5315     if (!C->getElementType()->isFloatingPointTy())
5316       return false;
5317     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5318       if (C->getElementAsAPFloat(I).isNaN())
5319         return false;
5320     }
5321     return true;
5322   }
5323 
5324   if (isa<ConstantAggregateZero>(V))
5325     return true;
5326 
5327   return false;
5328 }
5329 
5330 static bool isKnownNonZero(const Value *V) {
5331   if (auto *C = dyn_cast<ConstantFP>(V))
5332     return !C->isZero();
5333 
5334   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5335     if (!C->getElementType()->isFloatingPointTy())
5336       return false;
5337     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5338       if (C->getElementAsAPFloat(I).isZero())
5339         return false;
5340     }
5341     return true;
5342   }
5343 
5344   return false;
5345 }
5346 
5347 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5348 /// Given non-min/max outer cmp/select from the clamp pattern this
5349 /// function recognizes if it can be substitued by a "canonical" min/max
5350 /// pattern.
5351 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5352                                                Value *CmpLHS, Value *CmpRHS,
5353                                                Value *TrueVal, Value *FalseVal,
5354                                                Value *&LHS, Value *&RHS) {
5355   // Try to match
5356   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5357   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5358   // and return description of the outer Max/Min.
5359 
5360   // First, check if select has inverse order:
5361   if (CmpRHS == FalseVal) {
5362     std::swap(TrueVal, FalseVal);
5363     Pred = CmpInst::getInversePredicate(Pred);
5364   }
5365 
5366   // Assume success now. If there's no match, callers should not use these anyway.
5367   LHS = TrueVal;
5368   RHS = FalseVal;
5369 
5370   const APFloat *FC1;
5371   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5372     return {SPF_UNKNOWN, SPNB_NA, false};
5373 
5374   const APFloat *FC2;
5375   switch (Pred) {
5376   case CmpInst::FCMP_OLT:
5377   case CmpInst::FCMP_OLE:
5378   case CmpInst::FCMP_ULT:
5379   case CmpInst::FCMP_ULE:
5380     if (match(FalseVal,
5381               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5382                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5383         *FC1 < *FC2)
5384       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5385     break;
5386   case CmpInst::FCMP_OGT:
5387   case CmpInst::FCMP_OGE:
5388   case CmpInst::FCMP_UGT:
5389   case CmpInst::FCMP_UGE:
5390     if (match(FalseVal,
5391               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5392                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5393         *FC1 > *FC2)
5394       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5395     break;
5396   default:
5397     break;
5398   }
5399 
5400   return {SPF_UNKNOWN, SPNB_NA, false};
5401 }
5402 
5403 /// Recognize variations of:
5404 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5405 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5406                                       Value *CmpLHS, Value *CmpRHS,
5407                                       Value *TrueVal, Value *FalseVal) {
5408   // Swap the select operands and predicate to match the patterns below.
5409   if (CmpRHS != TrueVal) {
5410     Pred = ICmpInst::getSwappedPredicate(Pred);
5411     std::swap(TrueVal, FalseVal);
5412   }
5413   const APInt *C1;
5414   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5415     const APInt *C2;
5416     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5417     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5418         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5419       return {SPF_SMAX, SPNB_NA, false};
5420 
5421     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5422     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5423         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5424       return {SPF_SMIN, SPNB_NA, false};
5425 
5426     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5427     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5428         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5429       return {SPF_UMAX, SPNB_NA, false};
5430 
5431     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5432     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5433         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5434       return {SPF_UMIN, SPNB_NA, false};
5435   }
5436   return {SPF_UNKNOWN, SPNB_NA, false};
5437 }
5438 
5439 /// Recognize variations of:
5440 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5441 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5442                                                Value *CmpLHS, Value *CmpRHS,
5443                                                Value *TVal, Value *FVal,
5444                                                unsigned Depth) {
5445   // TODO: Allow FP min/max with nnan/nsz.
5446   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5447 
5448   Value *A = nullptr, *B = nullptr;
5449   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5450   if (!SelectPatternResult::isMinOrMax(L.Flavor))
5451     return {SPF_UNKNOWN, SPNB_NA, false};
5452 
5453   Value *C = nullptr, *D = nullptr;
5454   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5455   if (L.Flavor != R.Flavor)
5456     return {SPF_UNKNOWN, SPNB_NA, false};
5457 
5458   // We have something like: x Pred y ? min(a, b) : min(c, d).
5459   // Try to match the compare to the min/max operations of the select operands.
5460   // First, make sure we have the right compare predicate.
5461   switch (L.Flavor) {
5462   case SPF_SMIN:
5463     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5464       Pred = ICmpInst::getSwappedPredicate(Pred);
5465       std::swap(CmpLHS, CmpRHS);
5466     }
5467     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5468       break;
5469     return {SPF_UNKNOWN, SPNB_NA, false};
5470   case SPF_SMAX:
5471     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5472       Pred = ICmpInst::getSwappedPredicate(Pred);
5473       std::swap(CmpLHS, CmpRHS);
5474     }
5475     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5476       break;
5477     return {SPF_UNKNOWN, SPNB_NA, false};
5478   case SPF_UMIN:
5479     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5480       Pred = ICmpInst::getSwappedPredicate(Pred);
5481       std::swap(CmpLHS, CmpRHS);
5482     }
5483     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5484       break;
5485     return {SPF_UNKNOWN, SPNB_NA, false};
5486   case SPF_UMAX:
5487     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5488       Pred = ICmpInst::getSwappedPredicate(Pred);
5489       std::swap(CmpLHS, CmpRHS);
5490     }
5491     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5492       break;
5493     return {SPF_UNKNOWN, SPNB_NA, false};
5494   default:
5495     return {SPF_UNKNOWN, SPNB_NA, false};
5496   }
5497 
5498   // If there is a common operand in the already matched min/max and the other
5499   // min/max operands match the compare operands (either directly or inverted),
5500   // then this is min/max of the same flavor.
5501 
5502   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5503   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5504   if (D == B) {
5505     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5506                                          match(A, m_Not(m_Specific(CmpRHS)))))
5507       return {L.Flavor, SPNB_NA, false};
5508   }
5509   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5510   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5511   if (C == B) {
5512     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5513                                          match(A, m_Not(m_Specific(CmpRHS)))))
5514       return {L.Flavor, SPNB_NA, false};
5515   }
5516   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5517   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5518   if (D == A) {
5519     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5520                                          match(B, m_Not(m_Specific(CmpRHS)))))
5521       return {L.Flavor, SPNB_NA, false};
5522   }
5523   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5524   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5525   if (C == A) {
5526     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5527                                          match(B, m_Not(m_Specific(CmpRHS)))))
5528       return {L.Flavor, SPNB_NA, false};
5529   }
5530 
5531   return {SPF_UNKNOWN, SPNB_NA, false};
5532 }
5533 
5534 /// If the input value is the result of a 'not' op, constant integer, or vector
5535 /// splat of a constant integer, return the bitwise-not source value.
5536 /// TODO: This could be extended to handle non-splat vector integer constants.
5537 static Value *getNotValue(Value *V) {
5538   Value *NotV;
5539   if (match(V, m_Not(m_Value(NotV))))
5540     return NotV;
5541 
5542   const APInt *C;
5543   if (match(V, m_APInt(C)))
5544     return ConstantInt::get(V->getType(), ~(*C));
5545 
5546   return nullptr;
5547 }
5548 
5549 /// Match non-obvious integer minimum and maximum sequences.
5550 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5551                                        Value *CmpLHS, Value *CmpRHS,
5552                                        Value *TrueVal, Value *FalseVal,
5553                                        Value *&LHS, Value *&RHS,
5554                                        unsigned Depth) {
5555   // Assume success. If there's no match, callers should not use these anyway.
5556   LHS = TrueVal;
5557   RHS = FalseVal;
5558 
5559   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5560   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5561     return SPR;
5562 
5563   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5564   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5565     return SPR;
5566 
5567   // Look through 'not' ops to find disguised min/max.
5568   // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5569   // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5570   if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5571     switch (Pred) {
5572     case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5573     case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5574     case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5575     case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5576     default: break;
5577     }
5578   }
5579 
5580   // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5581   // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5582   if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5583     switch (Pred) {
5584     case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5585     case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5586     case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5587     case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5588     default: break;
5589     }
5590   }
5591 
5592   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5593     return {SPF_UNKNOWN, SPNB_NA, false};
5594 
5595   // Z = X -nsw Y
5596   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5597   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5598   if (match(TrueVal, m_Zero()) &&
5599       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5600     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5601 
5602   // Z = X -nsw Y
5603   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5604   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5605   if (match(FalseVal, m_Zero()) &&
5606       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5607     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5608 
5609   const APInt *C1;
5610   if (!match(CmpRHS, m_APInt(C1)))
5611     return {SPF_UNKNOWN, SPNB_NA, false};
5612 
5613   // An unsigned min/max can be written with a signed compare.
5614   const APInt *C2;
5615   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5616       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5617     // Is the sign bit set?
5618     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5619     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5620     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5621         C2->isMaxSignedValue())
5622       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5623 
5624     // Is the sign bit clear?
5625     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5626     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5627     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5628         C2->isMinSignedValue())
5629       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5630   }
5631 
5632   return {SPF_UNKNOWN, SPNB_NA, false};
5633 }
5634 
5635 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5636   assert(X && Y && "Invalid operand");
5637 
5638   // X = sub (0, Y) || X = sub nsw (0, Y)
5639   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5640       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5641     return true;
5642 
5643   // Y = sub (0, X) || Y = sub nsw (0, X)
5644   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5645       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5646     return true;
5647 
5648   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5649   Value *A, *B;
5650   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5651                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5652          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5653                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5654 }
5655 
5656 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5657                                               FastMathFlags FMF,
5658                                               Value *CmpLHS, Value *CmpRHS,
5659                                               Value *TrueVal, Value *FalseVal,
5660                                               Value *&LHS, Value *&RHS,
5661                                               unsigned Depth) {
5662   if (CmpInst::isFPPredicate(Pred)) {
5663     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5664     // 0.0 operand, set the compare's 0.0 operands to that same value for the
5665     // purpose of identifying min/max. Disregard vector constants with undefined
5666     // elements because those can not be back-propagated for analysis.
5667     Value *OutputZeroVal = nullptr;
5668     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5669         !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
5670       OutputZeroVal = TrueVal;
5671     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5672              !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
5673       OutputZeroVal = FalseVal;
5674 
5675     if (OutputZeroVal) {
5676       if (match(CmpLHS, m_AnyZeroFP()))
5677         CmpLHS = OutputZeroVal;
5678       if (match(CmpRHS, m_AnyZeroFP()))
5679         CmpRHS = OutputZeroVal;
5680     }
5681   }
5682 
5683   LHS = CmpLHS;
5684   RHS = CmpRHS;
5685 
5686   // Signed zero may return inconsistent results between implementations.
5687   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5688   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5689   // Therefore, we behave conservatively and only proceed if at least one of the
5690   // operands is known to not be zero or if we don't care about signed zero.
5691   switch (Pred) {
5692   default: break;
5693   // FIXME: Include OGT/OLT/UGT/ULT.
5694   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5695   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5696     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5697         !isKnownNonZero(CmpRHS))
5698       return {SPF_UNKNOWN, SPNB_NA, false};
5699   }
5700 
5701   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5702   bool Ordered = false;
5703 
5704   // When given one NaN and one non-NaN input:
5705   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5706   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5707   //     ordered comparison fails), which could be NaN or non-NaN.
5708   // so here we discover exactly what NaN behavior is required/accepted.
5709   if (CmpInst::isFPPredicate(Pred)) {
5710     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5711     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5712 
5713     if (LHSSafe && RHSSafe) {
5714       // Both operands are known non-NaN.
5715       NaNBehavior = SPNB_RETURNS_ANY;
5716     } else if (CmpInst::isOrdered(Pred)) {
5717       // An ordered comparison will return false when given a NaN, so it
5718       // returns the RHS.
5719       Ordered = true;
5720       if (LHSSafe)
5721         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5722         NaNBehavior = SPNB_RETURNS_NAN;
5723       else if (RHSSafe)
5724         NaNBehavior = SPNB_RETURNS_OTHER;
5725       else
5726         // Completely unsafe.
5727         return {SPF_UNKNOWN, SPNB_NA, false};
5728     } else {
5729       Ordered = false;
5730       // An unordered comparison will return true when given a NaN, so it
5731       // returns the LHS.
5732       if (LHSSafe)
5733         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5734         NaNBehavior = SPNB_RETURNS_OTHER;
5735       else if (RHSSafe)
5736         NaNBehavior = SPNB_RETURNS_NAN;
5737       else
5738         // Completely unsafe.
5739         return {SPF_UNKNOWN, SPNB_NA, false};
5740     }
5741   }
5742 
5743   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5744     std::swap(CmpLHS, CmpRHS);
5745     Pred = CmpInst::getSwappedPredicate(Pred);
5746     if (NaNBehavior == SPNB_RETURNS_NAN)
5747       NaNBehavior = SPNB_RETURNS_OTHER;
5748     else if (NaNBehavior == SPNB_RETURNS_OTHER)
5749       NaNBehavior = SPNB_RETURNS_NAN;
5750     Ordered = !Ordered;
5751   }
5752 
5753   // ([if]cmp X, Y) ? X : Y
5754   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5755     switch (Pred) {
5756     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5757     case ICmpInst::ICMP_UGT:
5758     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5759     case ICmpInst::ICMP_SGT:
5760     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5761     case ICmpInst::ICMP_ULT:
5762     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5763     case ICmpInst::ICMP_SLT:
5764     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5765     case FCmpInst::FCMP_UGT:
5766     case FCmpInst::FCMP_UGE:
5767     case FCmpInst::FCMP_OGT:
5768     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5769     case FCmpInst::FCMP_ULT:
5770     case FCmpInst::FCMP_ULE:
5771     case FCmpInst::FCMP_OLT:
5772     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5773     }
5774   }
5775 
5776   if (isKnownNegation(TrueVal, FalseVal)) {
5777     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5778     // match against either LHS or sext(LHS).
5779     auto MaybeSExtCmpLHS =
5780         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5781     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5782     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5783     if (match(TrueVal, MaybeSExtCmpLHS)) {
5784       // Set the return values. If the compare uses the negated value (-X >s 0),
5785       // swap the return values because the negated value is always 'RHS'.
5786       LHS = TrueVal;
5787       RHS = FalseVal;
5788       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5789         std::swap(LHS, RHS);
5790 
5791       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5792       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5793       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5794         return {SPF_ABS, SPNB_NA, false};
5795 
5796       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5797       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5798         return {SPF_ABS, SPNB_NA, false};
5799 
5800       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5801       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5802       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5803         return {SPF_NABS, SPNB_NA, false};
5804     }
5805     else if (match(FalseVal, MaybeSExtCmpLHS)) {
5806       // Set the return values. If the compare uses the negated value (-X >s 0),
5807       // swap the return values because the negated value is always 'RHS'.
5808       LHS = FalseVal;
5809       RHS = TrueVal;
5810       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5811         std::swap(LHS, RHS);
5812 
5813       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5814       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5815       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5816         return {SPF_NABS, SPNB_NA, false};
5817 
5818       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5819       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5820       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5821         return {SPF_ABS, SPNB_NA, false};
5822     }
5823   }
5824 
5825   if (CmpInst::isIntPredicate(Pred))
5826     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5827 
5828   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5829   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5830   // semantics than minNum. Be conservative in such case.
5831   if (NaNBehavior != SPNB_RETURNS_ANY ||
5832       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5833        !isKnownNonZero(CmpRHS)))
5834     return {SPF_UNKNOWN, SPNB_NA, false};
5835 
5836   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5837 }
5838 
5839 /// Helps to match a select pattern in case of a type mismatch.
5840 ///
5841 /// The function processes the case when type of true and false values of a
5842 /// select instruction differs from type of the cmp instruction operands because
5843 /// of a cast instruction. The function checks if it is legal to move the cast
5844 /// operation after "select". If yes, it returns the new second value of
5845 /// "select" (with the assumption that cast is moved):
5846 /// 1. As operand of cast instruction when both values of "select" are same cast
5847 /// instructions.
5848 /// 2. As restored constant (by applying reverse cast operation) when the first
5849 /// value of the "select" is a cast operation and the second value is a
5850 /// constant.
5851 /// NOTE: We return only the new second value because the first value could be
5852 /// accessed as operand of cast instruction.
5853 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
5854                               Instruction::CastOps *CastOp) {
5855   auto *Cast1 = dyn_cast<CastInst>(V1);
5856   if (!Cast1)
5857     return nullptr;
5858 
5859   *CastOp = Cast1->getOpcode();
5860   Type *SrcTy = Cast1->getSrcTy();
5861   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5862     // If V1 and V2 are both the same cast from the same type, look through V1.
5863     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5864       return Cast2->getOperand(0);
5865     return nullptr;
5866   }
5867 
5868   auto *C = dyn_cast<Constant>(V2);
5869   if (!C)
5870     return nullptr;
5871 
5872   Constant *CastedTo = nullptr;
5873   switch (*CastOp) {
5874   case Instruction::ZExt:
5875     if (CmpI->isUnsigned())
5876       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5877     break;
5878   case Instruction::SExt:
5879     if (CmpI->isSigned())
5880       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
5881     break;
5882   case Instruction::Trunc:
5883     Constant *CmpConst;
5884     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
5885         CmpConst->getType() == SrcTy) {
5886       // Here we have the following case:
5887       //
5888       //   %cond = cmp iN %x, CmpConst
5889       //   %tr = trunc iN %x to iK
5890       //   %narrowsel = select i1 %cond, iK %t, iK C
5891       //
5892       // We can always move trunc after select operation:
5893       //
5894       //   %cond = cmp iN %x, CmpConst
5895       //   %widesel = select i1 %cond, iN %x, iN CmpConst
5896       //   %tr = trunc iN %widesel to iK
5897       //
5898       // Note that C could be extended in any way because we don't care about
5899       // upper bits after truncation. It can't be abs pattern, because it would
5900       // look like:
5901       //
5902       //   select i1 %cond, x, -x.
5903       //
5904       // So only min/max pattern could be matched. Such match requires widened C
5905       // == CmpConst. That is why set widened C = CmpConst, condition trunc
5906       // CmpConst == C is checked below.
5907       CastedTo = CmpConst;
5908     } else {
5909       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5910     }
5911     break;
5912   case Instruction::FPTrunc:
5913     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5914     break;
5915   case Instruction::FPExt:
5916     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5917     break;
5918   case Instruction::FPToUI:
5919     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5920     break;
5921   case Instruction::FPToSI:
5922     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5923     break;
5924   case Instruction::UIToFP:
5925     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5926     break;
5927   case Instruction::SIToFP:
5928     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5929     break;
5930   default:
5931     break;
5932   }
5933 
5934   if (!CastedTo)
5935     return nullptr;
5936 
5937   // Make sure the cast doesn't lose any information.
5938   Constant *CastedBack =
5939       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5940   if (CastedBack != C)
5941     return nullptr;
5942 
5943   return CastedTo;
5944 }
5945 
5946 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5947                                              Instruction::CastOps *CastOp,
5948                                              unsigned Depth) {
5949   if (Depth >= MaxAnalysisRecursionDepth)
5950     return {SPF_UNKNOWN, SPNB_NA, false};
5951 
5952   SelectInst *SI = dyn_cast<SelectInst>(V);
5953   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5954 
5955   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5956   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5957 
5958   Value *TrueVal = SI->getTrueValue();
5959   Value *FalseVal = SI->getFalseValue();
5960 
5961   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5962                                             CastOp, Depth);
5963 }
5964 
5965 SelectPatternResult llvm::matchDecomposedSelectPattern(
5966     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5967     Instruction::CastOps *CastOp, unsigned Depth) {
5968   CmpInst::Predicate Pred = CmpI->getPredicate();
5969   Value *CmpLHS = CmpI->getOperand(0);
5970   Value *CmpRHS = CmpI->getOperand(1);
5971   FastMathFlags FMF;
5972   if (isa<FPMathOperator>(CmpI))
5973     FMF = CmpI->getFastMathFlags();
5974 
5975   // Bail out early.
5976   if (CmpI->isEquality())
5977     return {SPF_UNKNOWN, SPNB_NA, false};
5978 
5979   // Deal with type mismatches.
5980   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5981     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5982       // If this is a potential fmin/fmax with a cast to integer, then ignore
5983       // -0.0 because there is no corresponding integer value.
5984       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5985         FMF.setNoSignedZeros();
5986       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5987                                   cast<CastInst>(TrueVal)->getOperand(0), C,
5988                                   LHS, RHS, Depth);
5989     }
5990     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5991       // If this is a potential fmin/fmax with a cast to integer, then ignore
5992       // -0.0 because there is no corresponding integer value.
5993       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5994         FMF.setNoSignedZeros();
5995       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5996                                   C, cast<CastInst>(FalseVal)->getOperand(0),
5997                                   LHS, RHS, Depth);
5998     }
5999   }
6000   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
6001                               LHS, RHS, Depth);
6002 }
6003 
6004 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
6005   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
6006   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
6007   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
6008   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
6009   if (SPF == SPF_FMINNUM)
6010     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
6011   if (SPF == SPF_FMAXNUM)
6012     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
6013   llvm_unreachable("unhandled!");
6014 }
6015 
6016 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
6017   if (SPF == SPF_SMIN) return SPF_SMAX;
6018   if (SPF == SPF_UMIN) return SPF_UMAX;
6019   if (SPF == SPF_SMAX) return SPF_SMIN;
6020   if (SPF == SPF_UMAX) return SPF_UMIN;
6021   llvm_unreachable("unhandled!");
6022 }
6023 
6024 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
6025   switch (MinMaxID) {
6026   case Intrinsic::smax: return Intrinsic::smin;
6027   case Intrinsic::smin: return Intrinsic::smax;
6028   case Intrinsic::umax: return Intrinsic::umin;
6029   case Intrinsic::umin: return Intrinsic::umax;
6030   default: llvm_unreachable("Unexpected intrinsic");
6031   }
6032 }
6033 
6034 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
6035   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
6036 }
6037 
6038 std::pair<Intrinsic::ID, bool>
6039 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
6040   // Check if VL contains select instructions that can be folded into a min/max
6041   // vector intrinsic and return the intrinsic if it is possible.
6042   // TODO: Support floating point min/max.
6043   bool AllCmpSingleUse = true;
6044   SelectPatternResult SelectPattern;
6045   SelectPattern.Flavor = SPF_UNKNOWN;
6046   if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
6047         Value *LHS, *RHS;
6048         auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
6049         if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
6050             CurrentPattern.Flavor == SPF_FMINNUM ||
6051             CurrentPattern.Flavor == SPF_FMAXNUM ||
6052             !I->getType()->isIntOrIntVectorTy())
6053           return false;
6054         if (SelectPattern.Flavor != SPF_UNKNOWN &&
6055             SelectPattern.Flavor != CurrentPattern.Flavor)
6056           return false;
6057         SelectPattern = CurrentPattern;
6058         AllCmpSingleUse &=
6059             match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
6060         return true;
6061       })) {
6062     switch (SelectPattern.Flavor) {
6063     case SPF_SMIN:
6064       return {Intrinsic::smin, AllCmpSingleUse};
6065     case SPF_UMIN:
6066       return {Intrinsic::umin, AllCmpSingleUse};
6067     case SPF_SMAX:
6068       return {Intrinsic::smax, AllCmpSingleUse};
6069     case SPF_UMAX:
6070       return {Intrinsic::umax, AllCmpSingleUse};
6071     default:
6072       llvm_unreachable("unexpected select pattern flavor");
6073     }
6074   }
6075   return {Intrinsic::not_intrinsic, false};
6076 }
6077 
6078 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
6079                                  Value *&Start, Value *&Step) {
6080   // Handle the case of a simple two-predecessor recurrence PHI.
6081   // There's a lot more that could theoretically be done here, but
6082   // this is sufficient to catch some interesting cases.
6083   if (P->getNumIncomingValues() != 2)
6084     return false;
6085 
6086   for (unsigned i = 0; i != 2; ++i) {
6087     Value *L = P->getIncomingValue(i);
6088     Value *R = P->getIncomingValue(!i);
6089     Operator *LU = dyn_cast<Operator>(L);
6090     if (!LU)
6091       continue;
6092     unsigned Opcode = LU->getOpcode();
6093 
6094     switch (Opcode) {
6095     default:
6096       continue;
6097     // TODO: Expand list -- xor, div, gep, uaddo, etc..
6098     case Instruction::LShr:
6099     case Instruction::AShr:
6100     case Instruction::Shl:
6101     case Instruction::Add:
6102     case Instruction::Sub:
6103     case Instruction::And:
6104     case Instruction::Or:
6105     case Instruction::Mul: {
6106       Value *LL = LU->getOperand(0);
6107       Value *LR = LU->getOperand(1);
6108       // Find a recurrence.
6109       if (LL == P)
6110         L = LR;
6111       else if (LR == P)
6112         L = LL;
6113       else
6114         continue; // Check for recurrence with L and R flipped.
6115 
6116       break; // Match!
6117     }
6118     };
6119 
6120     // We have matched a recurrence of the form:
6121     //   %iv = [R, %entry], [%iv.next, %backedge]
6122     //   %iv.next = binop %iv, L
6123     // OR
6124     //   %iv = [R, %entry], [%iv.next, %backedge]
6125     //   %iv.next = binop L, %iv
6126     BO = cast<BinaryOperator>(LU);
6127     Start = R;
6128     Step = L;
6129     return true;
6130   }
6131   return false;
6132 }
6133 
6134 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
6135                                  Value *&Start, Value *&Step) {
6136   BinaryOperator *BO = nullptr;
6137   P = dyn_cast<PHINode>(I->getOperand(0));
6138   if (!P)
6139     P = dyn_cast<PHINode>(I->getOperand(1));
6140   return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
6141 }
6142 
6143 /// Return true if "icmp Pred LHS RHS" is always true.
6144 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6145                             const Value *RHS, const DataLayout &DL,
6146                             unsigned Depth) {
6147   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
6148   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6149     return true;
6150 
6151   switch (Pred) {
6152   default:
6153     return false;
6154 
6155   case CmpInst::ICMP_SLE: {
6156     const APInt *C;
6157 
6158     // LHS s<= LHS +_{nsw} C   if C >= 0
6159     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6160       return !C->isNegative();
6161     return false;
6162   }
6163 
6164   case CmpInst::ICMP_ULE: {
6165     const APInt *C;
6166 
6167     // LHS u<= LHS +_{nuw} C   for any C
6168     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6169       return true;
6170 
6171     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6172     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6173                                        const Value *&X,
6174                                        const APInt *&CA, const APInt *&CB) {
6175       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6176           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6177         return true;
6178 
6179       // If X & C == 0 then (X | C) == X +_{nuw} C
6180       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6181           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6182         KnownBits Known(CA->getBitWidth());
6183         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6184                          /*CxtI*/ nullptr, /*DT*/ nullptr);
6185         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6186           return true;
6187       }
6188 
6189       return false;
6190     };
6191 
6192     const Value *X;
6193     const APInt *CLHS, *CRHS;
6194     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6195       return CLHS->ule(*CRHS);
6196 
6197     return false;
6198   }
6199   }
6200 }
6201 
6202 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6203 /// ALHS ARHS" is true.  Otherwise, return None.
6204 static Optional<bool>
6205 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6206                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
6207                       const DataLayout &DL, unsigned Depth) {
6208   switch (Pred) {
6209   default:
6210     return None;
6211 
6212   case CmpInst::ICMP_SLT:
6213   case CmpInst::ICMP_SLE:
6214     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6215         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6216       return true;
6217     return None;
6218 
6219   case CmpInst::ICMP_ULT:
6220   case CmpInst::ICMP_ULE:
6221     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6222         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6223       return true;
6224     return None;
6225   }
6226 }
6227 
6228 /// Return true if the operands of the two compares match.  IsSwappedOps is true
6229 /// when the operands match, but are swapped.
6230 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6231                           const Value *BLHS, const Value *BRHS,
6232                           bool &IsSwappedOps) {
6233 
6234   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6235   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6236   return IsMatchingOps || IsSwappedOps;
6237 }
6238 
6239 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6240 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6241 /// Otherwise, return None if we can't infer anything.
6242 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6243                                                     CmpInst::Predicate BPred,
6244                                                     bool AreSwappedOps) {
6245   // Canonicalize the predicate as if the operands were not commuted.
6246   if (AreSwappedOps)
6247     BPred = ICmpInst::getSwappedPredicate(BPred);
6248 
6249   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6250     return true;
6251   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6252     return false;
6253 
6254   return None;
6255 }
6256 
6257 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6258 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6259 /// Otherwise, return None if we can't infer anything.
6260 static Optional<bool>
6261 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6262                                  const ConstantInt *C1,
6263                                  CmpInst::Predicate BPred,
6264                                  const ConstantInt *C2) {
6265   ConstantRange DomCR =
6266       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6267   ConstantRange CR =
6268       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
6269   ConstantRange Intersection = DomCR.intersectWith(CR);
6270   ConstantRange Difference = DomCR.difference(CR);
6271   if (Intersection.isEmptySet())
6272     return false;
6273   if (Difference.isEmptySet())
6274     return true;
6275   return None;
6276 }
6277 
6278 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6279 /// false.  Otherwise, return None if we can't infer anything.
6280 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6281                                          CmpInst::Predicate BPred,
6282                                          const Value *BLHS, const Value *BRHS,
6283                                          const DataLayout &DL, bool LHSIsTrue,
6284                                          unsigned Depth) {
6285   Value *ALHS = LHS->getOperand(0);
6286   Value *ARHS = LHS->getOperand(1);
6287 
6288   // The rest of the logic assumes the LHS condition is true.  If that's not the
6289   // case, invert the predicate to make it so.
6290   CmpInst::Predicate APred =
6291       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6292 
6293   // Can we infer anything when the two compares have matching operands?
6294   bool AreSwappedOps;
6295   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6296     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6297             APred, BPred, AreSwappedOps))
6298       return Implication;
6299     // No amount of additional analysis will infer the second condition, so
6300     // early exit.
6301     return None;
6302   }
6303 
6304   // Can we infer anything when the LHS operands match and the RHS operands are
6305   // constants (not necessarily matching)?
6306   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6307     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6308             APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6309       return Implication;
6310     // No amount of additional analysis will infer the second condition, so
6311     // early exit.
6312     return None;
6313   }
6314 
6315   if (APred == BPred)
6316     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6317   return None;
6318 }
6319 
6320 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6321 /// false.  Otherwise, return None if we can't infer anything.  We expect the
6322 /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction.
6323 static Optional<bool>
6324 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6325                    const Value *RHSOp0, const Value *RHSOp1,
6326                    const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6327   // The LHS must be an 'or', 'and', or a 'select' instruction.
6328   assert((LHS->getOpcode() == Instruction::And ||
6329           LHS->getOpcode() == Instruction::Or ||
6330           LHS->getOpcode() == Instruction::Select) &&
6331          "Expected LHS to be 'and', 'or', or 'select'.");
6332 
6333   assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6334 
6335   // If the result of an 'or' is false, then we know both legs of the 'or' are
6336   // false.  Similarly, if the result of an 'and' is true, then we know both
6337   // legs of the 'and' are true.
6338   const Value *ALHS, *ARHS;
6339   if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6340       (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6341     // FIXME: Make this non-recursion.
6342     if (Optional<bool> Implication = isImpliedCondition(
6343             ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6344       return Implication;
6345     if (Optional<bool> Implication = isImpliedCondition(
6346             ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6347       return Implication;
6348     return None;
6349   }
6350   return None;
6351 }
6352 
6353 Optional<bool>
6354 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6355                          const Value *RHSOp0, const Value *RHSOp1,
6356                          const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6357   // Bail out when we hit the limit.
6358   if (Depth == MaxAnalysisRecursionDepth)
6359     return None;
6360 
6361   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6362   // example.
6363   if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6364     return None;
6365 
6366   Type *OpTy = LHS->getType();
6367   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6368 
6369   // FIXME: Extending the code below to handle vectors.
6370   if (OpTy->isVectorTy())
6371     return None;
6372 
6373   assert(OpTy->isIntegerTy(1) && "implied by above");
6374 
6375   // Both LHS and RHS are icmps.
6376   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6377   if (LHSCmp)
6378     return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6379                               Depth);
6380 
6381   /// The LHS should be an 'or', 'and', or a 'select' instruction.  We expect
6382   /// the RHS to be an icmp.
6383   /// FIXME: Add support for and/or/select on the RHS.
6384   if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6385     if ((LHSI->getOpcode() == Instruction::And ||
6386          LHSI->getOpcode() == Instruction::Or ||
6387          LHSI->getOpcode() == Instruction::Select))
6388       return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6389                                 Depth);
6390   }
6391   return None;
6392 }
6393 
6394 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6395                                         const DataLayout &DL, bool LHSIsTrue,
6396                                         unsigned Depth) {
6397   // LHS ==> RHS by definition
6398   if (LHS == RHS)
6399     return LHSIsTrue;
6400 
6401   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6402   if (RHSCmp)
6403     return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6404                               RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6405                               LHSIsTrue, Depth);
6406   return None;
6407 }
6408 
6409 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6410 // condition dominating ContextI or nullptr, if no condition is found.
6411 static std::pair<Value *, bool>
6412 getDomPredecessorCondition(const Instruction *ContextI) {
6413   if (!ContextI || !ContextI->getParent())
6414     return {nullptr, false};
6415 
6416   // TODO: This is a poor/cheap way to determine dominance. Should we use a
6417   // dominator tree (eg, from a SimplifyQuery) instead?
6418   const BasicBlock *ContextBB = ContextI->getParent();
6419   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6420   if (!PredBB)
6421     return {nullptr, false};
6422 
6423   // We need a conditional branch in the predecessor.
6424   Value *PredCond;
6425   BasicBlock *TrueBB, *FalseBB;
6426   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6427     return {nullptr, false};
6428 
6429   // The branch should get simplified. Don't bother simplifying this condition.
6430   if (TrueBB == FalseBB)
6431     return {nullptr, false};
6432 
6433   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6434          "Predecessor block does not point to successor?");
6435 
6436   // Is this condition implied by the predecessor condition?
6437   return {PredCond, TrueBB == ContextBB};
6438 }
6439 
6440 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6441                                              const Instruction *ContextI,
6442                                              const DataLayout &DL) {
6443   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6444   auto PredCond = getDomPredecessorCondition(ContextI);
6445   if (PredCond.first)
6446     return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6447   return None;
6448 }
6449 
6450 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6451                                              const Value *LHS, const Value *RHS,
6452                                              const Instruction *ContextI,
6453                                              const DataLayout &DL) {
6454   auto PredCond = getDomPredecessorCondition(ContextI);
6455   if (PredCond.first)
6456     return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6457                               PredCond.second);
6458   return None;
6459 }
6460 
6461 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6462                               APInt &Upper, const InstrInfoQuery &IIQ) {
6463   unsigned Width = Lower.getBitWidth();
6464   const APInt *C;
6465   switch (BO.getOpcode()) {
6466   case Instruction::Add:
6467     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6468       // FIXME: If we have both nuw and nsw, we should reduce the range further.
6469       if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6470         // 'add nuw x, C' produces [C, UINT_MAX].
6471         Lower = *C;
6472       } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6473         if (C->isNegative()) {
6474           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6475           Lower = APInt::getSignedMinValue(Width);
6476           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6477         } else {
6478           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6479           Lower = APInt::getSignedMinValue(Width) + *C;
6480           Upper = APInt::getSignedMaxValue(Width) + 1;
6481         }
6482       }
6483     }
6484     break;
6485 
6486   case Instruction::And:
6487     if (match(BO.getOperand(1), m_APInt(C)))
6488       // 'and x, C' produces [0, C].
6489       Upper = *C + 1;
6490     break;
6491 
6492   case Instruction::Or:
6493     if (match(BO.getOperand(1), m_APInt(C)))
6494       // 'or x, C' produces [C, UINT_MAX].
6495       Lower = *C;
6496     break;
6497 
6498   case Instruction::AShr:
6499     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6500       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6501       Lower = APInt::getSignedMinValue(Width).ashr(*C);
6502       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6503     } else if (match(BO.getOperand(0), m_APInt(C))) {
6504       unsigned ShiftAmount = Width - 1;
6505       if (!C->isNullValue() && IIQ.isExact(&BO))
6506         ShiftAmount = C->countTrailingZeros();
6507       if (C->isNegative()) {
6508         // 'ashr C, x' produces [C, C >> (Width-1)]
6509         Lower = *C;
6510         Upper = C->ashr(ShiftAmount) + 1;
6511       } else {
6512         // 'ashr C, x' produces [C >> (Width-1), C]
6513         Lower = C->ashr(ShiftAmount);
6514         Upper = *C + 1;
6515       }
6516     }
6517     break;
6518 
6519   case Instruction::LShr:
6520     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6521       // 'lshr x, C' produces [0, UINT_MAX >> C].
6522       Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
6523     } else if (match(BO.getOperand(0), m_APInt(C))) {
6524       // 'lshr C, x' produces [C >> (Width-1), C].
6525       unsigned ShiftAmount = Width - 1;
6526       if (!C->isNullValue() && IIQ.isExact(&BO))
6527         ShiftAmount = C->countTrailingZeros();
6528       Lower = C->lshr(ShiftAmount);
6529       Upper = *C + 1;
6530     }
6531     break;
6532 
6533   case Instruction::Shl:
6534     if (match(BO.getOperand(0), m_APInt(C))) {
6535       if (IIQ.hasNoUnsignedWrap(&BO)) {
6536         // 'shl nuw C, x' produces [C, C << CLZ(C)]
6537         Lower = *C;
6538         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6539       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6540         if (C->isNegative()) {
6541           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6542           unsigned ShiftAmount = C->countLeadingOnes() - 1;
6543           Lower = C->shl(ShiftAmount);
6544           Upper = *C + 1;
6545         } else {
6546           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6547           unsigned ShiftAmount = C->countLeadingZeros() - 1;
6548           Lower = *C;
6549           Upper = C->shl(ShiftAmount) + 1;
6550         }
6551       }
6552     }
6553     break;
6554 
6555   case Instruction::SDiv:
6556     if (match(BO.getOperand(1), m_APInt(C))) {
6557       APInt IntMin = APInt::getSignedMinValue(Width);
6558       APInt IntMax = APInt::getSignedMaxValue(Width);
6559       if (C->isAllOnesValue()) {
6560         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6561         //    where C != -1 and C != 0 and C != 1
6562         Lower = IntMin + 1;
6563         Upper = IntMax + 1;
6564       } else if (C->countLeadingZeros() < Width - 1) {
6565         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6566         //    where C != -1 and C != 0 and C != 1
6567         Lower = IntMin.sdiv(*C);
6568         Upper = IntMax.sdiv(*C);
6569         if (Lower.sgt(Upper))
6570           std::swap(Lower, Upper);
6571         Upper = Upper + 1;
6572         assert(Upper != Lower && "Upper part of range has wrapped!");
6573       }
6574     } else if (match(BO.getOperand(0), m_APInt(C))) {
6575       if (C->isMinSignedValue()) {
6576         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6577         Lower = *C;
6578         Upper = Lower.lshr(1) + 1;
6579       } else {
6580         // 'sdiv C, x' produces [-|C|, |C|].
6581         Upper = C->abs() + 1;
6582         Lower = (-Upper) + 1;
6583       }
6584     }
6585     break;
6586 
6587   case Instruction::UDiv:
6588     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6589       // 'udiv x, C' produces [0, UINT_MAX / C].
6590       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6591     } else if (match(BO.getOperand(0), m_APInt(C))) {
6592       // 'udiv C, x' produces [0, C].
6593       Upper = *C + 1;
6594     }
6595     break;
6596 
6597   case Instruction::SRem:
6598     if (match(BO.getOperand(1), m_APInt(C))) {
6599       // 'srem x, C' produces (-|C|, |C|).
6600       Upper = C->abs();
6601       Lower = (-Upper) + 1;
6602     }
6603     break;
6604 
6605   case Instruction::URem:
6606     if (match(BO.getOperand(1), m_APInt(C)))
6607       // 'urem x, C' produces [0, C).
6608       Upper = *C;
6609     break;
6610 
6611   default:
6612     break;
6613   }
6614 }
6615 
6616 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6617                                   APInt &Upper) {
6618   unsigned Width = Lower.getBitWidth();
6619   const APInt *C;
6620   switch (II.getIntrinsicID()) {
6621   case Intrinsic::ctpop:
6622   case Intrinsic::ctlz:
6623   case Intrinsic::cttz:
6624     // Maximum of set/clear bits is the bit width.
6625     assert(Lower == 0 && "Expected lower bound to be zero");
6626     Upper = Width + 1;
6627     break;
6628   case Intrinsic::uadd_sat:
6629     // uadd.sat(x, C) produces [C, UINT_MAX].
6630     if (match(II.getOperand(0), m_APInt(C)) ||
6631         match(II.getOperand(1), m_APInt(C)))
6632       Lower = *C;
6633     break;
6634   case Intrinsic::sadd_sat:
6635     if (match(II.getOperand(0), m_APInt(C)) ||
6636         match(II.getOperand(1), m_APInt(C))) {
6637       if (C->isNegative()) {
6638         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6639         Lower = APInt::getSignedMinValue(Width);
6640         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6641       } else {
6642         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6643         Lower = APInt::getSignedMinValue(Width) + *C;
6644         Upper = APInt::getSignedMaxValue(Width) + 1;
6645       }
6646     }
6647     break;
6648   case Intrinsic::usub_sat:
6649     // usub.sat(C, x) produces [0, C].
6650     if (match(II.getOperand(0), m_APInt(C)))
6651       Upper = *C + 1;
6652     // usub.sat(x, C) produces [0, UINT_MAX - C].
6653     else if (match(II.getOperand(1), m_APInt(C)))
6654       Upper = APInt::getMaxValue(Width) - *C + 1;
6655     break;
6656   case Intrinsic::ssub_sat:
6657     if (match(II.getOperand(0), m_APInt(C))) {
6658       if (C->isNegative()) {
6659         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6660         Lower = APInt::getSignedMinValue(Width);
6661         Upper = *C - APInt::getSignedMinValue(Width) + 1;
6662       } else {
6663         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6664         Lower = *C - APInt::getSignedMaxValue(Width);
6665         Upper = APInt::getSignedMaxValue(Width) + 1;
6666       }
6667     } else if (match(II.getOperand(1), m_APInt(C))) {
6668       if (C->isNegative()) {
6669         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6670         Lower = APInt::getSignedMinValue(Width) - *C;
6671         Upper = APInt::getSignedMaxValue(Width) + 1;
6672       } else {
6673         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6674         Lower = APInt::getSignedMinValue(Width);
6675         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6676       }
6677     }
6678     break;
6679   case Intrinsic::umin:
6680   case Intrinsic::umax:
6681   case Intrinsic::smin:
6682   case Intrinsic::smax:
6683     if (!match(II.getOperand(0), m_APInt(C)) &&
6684         !match(II.getOperand(1), m_APInt(C)))
6685       break;
6686 
6687     switch (II.getIntrinsicID()) {
6688     case Intrinsic::umin:
6689       Upper = *C + 1;
6690       break;
6691     case Intrinsic::umax:
6692       Lower = *C;
6693       break;
6694     case Intrinsic::smin:
6695       Lower = APInt::getSignedMinValue(Width);
6696       Upper = *C + 1;
6697       break;
6698     case Intrinsic::smax:
6699       Lower = *C;
6700       Upper = APInt::getSignedMaxValue(Width) + 1;
6701       break;
6702     default:
6703       llvm_unreachable("Must be min/max intrinsic");
6704     }
6705     break;
6706   case Intrinsic::abs:
6707     // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6708     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6709     if (match(II.getOperand(1), m_One()))
6710       Upper = APInt::getSignedMaxValue(Width) + 1;
6711     else
6712       Upper = APInt::getSignedMinValue(Width) + 1;
6713     break;
6714   default:
6715     break;
6716   }
6717 }
6718 
6719 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6720                                       APInt &Upper, const InstrInfoQuery &IIQ) {
6721   const Value *LHS = nullptr, *RHS = nullptr;
6722   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6723   if (R.Flavor == SPF_UNKNOWN)
6724     return;
6725 
6726   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6727 
6728   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6729     // If the negation part of the abs (in RHS) has the NSW flag,
6730     // then the result of abs(X) is [0..SIGNED_MAX],
6731     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6732     Lower = APInt::getNullValue(BitWidth);
6733     if (match(RHS, m_Neg(m_Specific(LHS))) &&
6734         IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6735       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6736     else
6737       Upper = APInt::getSignedMinValue(BitWidth) + 1;
6738     return;
6739   }
6740 
6741   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6742     // The result of -abs(X) is <= 0.
6743     Lower = APInt::getSignedMinValue(BitWidth);
6744     Upper = APInt(BitWidth, 1);
6745     return;
6746   }
6747 
6748   const APInt *C;
6749   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6750     return;
6751 
6752   switch (R.Flavor) {
6753     case SPF_UMIN:
6754       Upper = *C + 1;
6755       break;
6756     case SPF_UMAX:
6757       Lower = *C;
6758       break;
6759     case SPF_SMIN:
6760       Lower = APInt::getSignedMinValue(BitWidth);
6761       Upper = *C + 1;
6762       break;
6763     case SPF_SMAX:
6764       Lower = *C;
6765       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6766       break;
6767     default:
6768       break;
6769   }
6770 }
6771 
6772 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
6773                                          AssumptionCache *AC,
6774                                          const Instruction *CtxI,
6775                                          unsigned Depth) {
6776   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
6777 
6778   if (Depth == MaxAnalysisRecursionDepth)
6779     return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
6780 
6781   const APInt *C;
6782   if (match(V, m_APInt(C)))
6783     return ConstantRange(*C);
6784 
6785   InstrInfoQuery IIQ(UseInstrInfo);
6786   unsigned BitWidth = V->getType()->getScalarSizeInBits();
6787   APInt Lower = APInt(BitWidth, 0);
6788   APInt Upper = APInt(BitWidth, 0);
6789   if (auto *BO = dyn_cast<BinaryOperator>(V))
6790     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
6791   else if (auto *II = dyn_cast<IntrinsicInst>(V))
6792     setLimitsForIntrinsic(*II, Lower, Upper);
6793   else if (auto *SI = dyn_cast<SelectInst>(V))
6794     setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
6795 
6796   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
6797 
6798   if (auto *I = dyn_cast<Instruction>(V))
6799     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
6800       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
6801 
6802   if (CtxI && AC) {
6803     // Try to restrict the range based on information from assumptions.
6804     for (auto &AssumeVH : AC->assumptionsFor(V)) {
6805       if (!AssumeVH)
6806         continue;
6807       CallInst *I = cast<CallInst>(AssumeVH);
6808       assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
6809              "Got assumption for the wrong function!");
6810       assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
6811              "must be an assume intrinsic");
6812 
6813       if (!isValidAssumeForContext(I, CtxI, nullptr))
6814         continue;
6815       Value *Arg = I->getArgOperand(0);
6816       ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
6817       // Currently we just use information from comparisons.
6818       if (!Cmp || Cmp->getOperand(0) != V)
6819         continue;
6820       ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
6821                                                AC, I, Depth + 1);
6822       CR = CR.intersectWith(
6823           ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS));
6824     }
6825   }
6826 
6827   return CR;
6828 }
6829 
6830 static Optional<int64_t>
6831 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
6832   // Skip over the first indices.
6833   gep_type_iterator GTI = gep_type_begin(GEP);
6834   for (unsigned i = 1; i != Idx; ++i, ++GTI)
6835     /*skip along*/;
6836 
6837   // Compute the offset implied by the rest of the indices.
6838   int64_t Offset = 0;
6839   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
6840     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
6841     if (!OpC)
6842       return None;
6843     if (OpC->isZero())
6844       continue; // No offset.
6845 
6846     // Handle struct indices, which add their field offset to the pointer.
6847     if (StructType *STy = GTI.getStructTypeOrNull()) {
6848       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
6849       continue;
6850     }
6851 
6852     // Otherwise, we have a sequential type like an array or fixed-length
6853     // vector. Multiply the index by the ElementSize.
6854     TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
6855     if (Size.isScalable())
6856       return None;
6857     Offset += Size.getFixedSize() * OpC->getSExtValue();
6858   }
6859 
6860   return Offset;
6861 }
6862 
6863 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
6864                                         const DataLayout &DL) {
6865   Ptr1 = Ptr1->stripPointerCasts();
6866   Ptr2 = Ptr2->stripPointerCasts();
6867 
6868   // Handle the trivial case first.
6869   if (Ptr1 == Ptr2) {
6870     return 0;
6871   }
6872 
6873   const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
6874   const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
6875 
6876   // If one pointer is a GEP see if the GEP is a constant offset from the base,
6877   // as in "P" and "gep P, 1".
6878   // Also do this iteratively to handle the the following case:
6879   //   Ptr_t1 = GEP Ptr1, c1
6880   //   Ptr_t2 = GEP Ptr_t1, c2
6881   //   Ptr2 = GEP Ptr_t2, c3
6882   // where we will return c1+c2+c3.
6883   // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
6884   // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
6885   // are the same, and return the difference between offsets.
6886   auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
6887                                  const Value *Ptr) -> Optional<int64_t> {
6888     const GEPOperator *GEP_T = GEP;
6889     int64_t OffsetVal = 0;
6890     bool HasSameBase = false;
6891     while (GEP_T) {
6892       auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
6893       if (!Offset)
6894         return None;
6895       OffsetVal += *Offset;
6896       auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
6897       if (Op0 == Ptr) {
6898         HasSameBase = true;
6899         break;
6900       }
6901       GEP_T = dyn_cast<GEPOperator>(Op0);
6902     }
6903     if (!HasSameBase)
6904       return None;
6905     return OffsetVal;
6906   };
6907 
6908   if (GEP1) {
6909     auto Offset = getOffsetFromBase(GEP1, Ptr2);
6910     if (Offset)
6911       return -*Offset;
6912   }
6913   if (GEP2) {
6914     auto Offset = getOffsetFromBase(GEP2, Ptr1);
6915     if (Offset)
6916       return Offset;
6917   }
6918 
6919   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
6920   // base.  After that base, they may have some number of common (and
6921   // potentially variable) indices.  After that they handle some constant
6922   // offset, which determines their offset from each other.  At this point, we
6923   // handle no other case.
6924   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
6925     return None;
6926 
6927   // Skip any common indices and track the GEP types.
6928   unsigned Idx = 1;
6929   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
6930     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
6931       break;
6932 
6933   auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
6934   auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
6935   if (!Offset1 || !Offset2)
6936     return None;
6937   return *Offset2 - *Offset1;
6938 }
6939