1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/IntrinsicsAArch64.h"
55 #include "llvm/IR/IntrinsicsX86.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/User.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/KnownBits.h"
69 #include "llvm/Support/MathExtras.h"
70 #include <algorithm>
71 #include <array>
72 #include <cassert>
73 #include <cstdint>
74 #include <iterator>
75 #include <utility>
76 
77 using namespace llvm;
78 using namespace llvm::PatternMatch;
79 
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83                                               cl::Hidden, cl::init(20));
84 
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88   if (unsigned BitWidth = Ty->getScalarSizeInBits())
89     return BitWidth;
90 
91   return DL.getPointerTypeSizeInBits(Ty);
92 }
93 
94 namespace {
95 
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101   const DataLayout &DL;
102   AssumptionCache *AC;
103   const Instruction *CxtI;
104   const DominatorTree *DT;
105 
106   // Unlike the other analyses, this may be a nullptr because not all clients
107   // provide it currently.
108   OptimizationRemarkEmitter *ORE;
109 
110   /// Set of assumptions that should be excluded from further queries.
111   /// This is because of the potential for mutual recursion to cause
112   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113   /// classic case of this is assume(x = y), which will attempt to determine
114   /// bits in x from bits in y, which will attempt to determine bits in y from
115   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117   /// (all of which can call computeKnownBits), and so on.
118   std::array<const Value *, MaxAnalysisRecursionDepth> Excluded;
119 
120   /// If true, it is safe to use metadata during simplification.
121   InstrInfoQuery IIQ;
122 
123   unsigned NumExcluded = 0;
124 
125   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126         const DominatorTree *DT, bool UseInstrInfo,
127         OptimizationRemarkEmitter *ORE = nullptr)
128       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129 
130   Query(const Query &Q, const Value *NewExcl)
131       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132         NumExcluded(Q.NumExcluded) {
133     Excluded = Q.Excluded;
134     Excluded[NumExcluded++] = NewExcl;
135     assert(NumExcluded <= Excluded.size());
136   }
137 
138   bool isExcluded(const Value *Value) const {
139     if (NumExcluded == 0)
140       return false;
141     auto End = Excluded.begin() + NumExcluded;
142     return std::find(Excluded.begin(), End, Value) != End;
143   }
144 };
145 
146 } // end anonymous namespace
147 
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151   // If we've been provided with a context instruction, then use that (provided
152   // it has been inserted).
153   if (CxtI && CxtI->getParent())
154     return CxtI;
155 
156   // If the value is really an already-inserted instruction, then use that.
157   CxtI = dyn_cast<Instruction>(V);
158   if (CxtI && CxtI->getParent())
159     return CxtI;
160 
161   return nullptr;
162 }
163 
164 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
165   // If we've been provided with a context instruction, then use that (provided
166   // it has been inserted).
167   if (CxtI && CxtI->getParent())
168     return CxtI;
169 
170   // If the value is really an already-inserted instruction, then use that.
171   CxtI = dyn_cast<Instruction>(V1);
172   if (CxtI && CxtI->getParent())
173     return CxtI;
174 
175   CxtI = dyn_cast<Instruction>(V2);
176   if (CxtI && CxtI->getParent())
177     return CxtI;
178 
179   return nullptr;
180 }
181 
182 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
183                                    const APInt &DemandedElts,
184                                    APInt &DemandedLHS, APInt &DemandedRHS) {
185   // The length of scalable vectors is unknown at compile time, thus we
186   // cannot check their values
187   if (isa<ScalableVectorType>(Shuf->getType()))
188     return false;
189 
190   int NumElts =
191       cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
192   int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
193   DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
194   if (DemandedElts.isNullValue())
195     return true;
196   // Simple case of a shuffle with zeroinitializer.
197   if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
198     DemandedLHS.setBit(0);
199     return true;
200   }
201   for (int i = 0; i != NumMaskElts; ++i) {
202     if (!DemandedElts[i])
203       continue;
204     int M = Shuf->getMaskValue(i);
205     assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
206 
207     // For undef elements, we don't know anything about the common state of
208     // the shuffle result.
209     if (M == -1)
210       return false;
211     if (M < NumElts)
212       DemandedLHS.setBit(M % NumElts);
213     else
214       DemandedRHS.setBit(M % NumElts);
215   }
216 
217   return true;
218 }
219 
220 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
221                              KnownBits &Known, unsigned Depth, const Query &Q);
222 
223 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
224                              const Query &Q) {
225   // FIXME: We currently have no way to represent the DemandedElts of a scalable
226   // vector
227   if (isa<ScalableVectorType>(V->getType())) {
228     Known.resetAll();
229     return;
230   }
231 
232   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
233   APInt DemandedElts =
234       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
235   computeKnownBits(V, DemandedElts, Known, Depth, Q);
236 }
237 
238 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
239                             const DataLayout &DL, unsigned Depth,
240                             AssumptionCache *AC, const Instruction *CxtI,
241                             const DominatorTree *DT,
242                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
243   ::computeKnownBits(V, Known, Depth,
244                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
245 }
246 
247 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
248                             KnownBits &Known, const DataLayout &DL,
249                             unsigned Depth, AssumptionCache *AC,
250                             const Instruction *CxtI, const DominatorTree *DT,
251                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
252   ::computeKnownBits(V, DemandedElts, Known, Depth,
253                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
254 }
255 
256 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
257                                   unsigned Depth, const Query &Q);
258 
259 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
260                                   const Query &Q);
261 
262 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
263                                  unsigned Depth, AssumptionCache *AC,
264                                  const Instruction *CxtI,
265                                  const DominatorTree *DT,
266                                  OptimizationRemarkEmitter *ORE,
267                                  bool UseInstrInfo) {
268   return ::computeKnownBits(
269       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
270 }
271 
272 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
273                                  const DataLayout &DL, unsigned Depth,
274                                  AssumptionCache *AC, const Instruction *CxtI,
275                                  const DominatorTree *DT,
276                                  OptimizationRemarkEmitter *ORE,
277                                  bool UseInstrInfo) {
278   return ::computeKnownBits(
279       V, DemandedElts, Depth,
280       Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
281 }
282 
283 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
284                                const DataLayout &DL, AssumptionCache *AC,
285                                const Instruction *CxtI, const DominatorTree *DT,
286                                bool UseInstrInfo) {
287   assert(LHS->getType() == RHS->getType() &&
288          "LHS and RHS should have the same type");
289   assert(LHS->getType()->isIntOrIntVectorTy() &&
290          "LHS and RHS should be integers");
291   // Look for an inverted mask: (X & ~M) op (Y & M).
292   Value *M;
293   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
294       match(RHS, m_c_And(m_Specific(M), m_Value())))
295     return true;
296   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
297       match(LHS, m_c_And(m_Specific(M), m_Value())))
298     return true;
299   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
300   KnownBits LHSKnown(IT->getBitWidth());
301   KnownBits RHSKnown(IT->getBitWidth());
302   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
303   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
304   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
305 }
306 
307 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
308   for (const User *U : CxtI->users()) {
309     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
310       if (IC->isEquality())
311         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
312           if (C->isNullValue())
313             continue;
314     return false;
315   }
316   return true;
317 }
318 
319 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
320                                    const Query &Q);
321 
322 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
323                                   bool OrZero, unsigned Depth,
324                                   AssumptionCache *AC, const Instruction *CxtI,
325                                   const DominatorTree *DT, bool UseInstrInfo) {
326   return ::isKnownToBeAPowerOfTwo(
327       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
328 }
329 
330 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
331                            unsigned Depth, const Query &Q);
332 
333 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
334 
335 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
336                           AssumptionCache *AC, const Instruction *CxtI,
337                           const DominatorTree *DT, bool UseInstrInfo) {
338   return ::isKnownNonZero(V, Depth,
339                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
340 }
341 
342 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
343                               unsigned Depth, AssumptionCache *AC,
344                               const Instruction *CxtI, const DominatorTree *DT,
345                               bool UseInstrInfo) {
346   KnownBits Known =
347       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
348   return Known.isNonNegative();
349 }
350 
351 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
352                            AssumptionCache *AC, const Instruction *CxtI,
353                            const DominatorTree *DT, bool UseInstrInfo) {
354   if (auto *CI = dyn_cast<ConstantInt>(V))
355     return CI->getValue().isStrictlyPositive();
356 
357   // TODO: We'd doing two recursive queries here.  We should factor this such
358   // that only a single query is needed.
359   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
360          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
361 }
362 
363 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
364                            AssumptionCache *AC, const Instruction *CxtI,
365                            const DominatorTree *DT, bool UseInstrInfo) {
366   KnownBits Known =
367       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
368   return Known.isNegative();
369 }
370 
371 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
372                             const Query &Q);
373 
374 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
375                            const DataLayout &DL, AssumptionCache *AC,
376                            const Instruction *CxtI, const DominatorTree *DT,
377                            bool UseInstrInfo) {
378   return ::isKnownNonEqual(V1, V2, 0,
379                            Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
380                                  UseInstrInfo, /*ORE=*/nullptr));
381 }
382 
383 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
384                               const Query &Q);
385 
386 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
387                              const DataLayout &DL, unsigned Depth,
388                              AssumptionCache *AC, const Instruction *CxtI,
389                              const DominatorTree *DT, bool UseInstrInfo) {
390   return ::MaskedValueIsZero(
391       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
392 }
393 
394 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
395                                    unsigned Depth, const Query &Q);
396 
397 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
398                                    const Query &Q) {
399   // FIXME: We currently have no way to represent the DemandedElts of a scalable
400   // vector
401   if (isa<ScalableVectorType>(V->getType()))
402     return 1;
403 
404   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
405   APInt DemandedElts =
406       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
407   return ComputeNumSignBits(V, DemandedElts, Depth, Q);
408 }
409 
410 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
411                                   unsigned Depth, AssumptionCache *AC,
412                                   const Instruction *CxtI,
413                                   const DominatorTree *DT, bool UseInstrInfo) {
414   return ::ComputeNumSignBits(
415       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
416 }
417 
418 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
419                                    bool NSW, const APInt &DemandedElts,
420                                    KnownBits &KnownOut, KnownBits &Known2,
421                                    unsigned Depth, const Query &Q) {
422   computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
423 
424   // If one operand is unknown and we have no nowrap information,
425   // the result will be unknown independently of the second operand.
426   if (KnownOut.isUnknown() && !NSW)
427     return;
428 
429   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
430   KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
431 }
432 
433 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
434                                 const APInt &DemandedElts, KnownBits &Known,
435                                 KnownBits &Known2, unsigned Depth,
436                                 const Query &Q) {
437   computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
438   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
439 
440   bool isKnownNegative = false;
441   bool isKnownNonNegative = false;
442   // If the multiplication is known not to overflow, compute the sign bit.
443   if (NSW) {
444     if (Op0 == Op1) {
445       // The product of a number with itself is non-negative.
446       isKnownNonNegative = true;
447     } else {
448       bool isKnownNonNegativeOp1 = Known.isNonNegative();
449       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
450       bool isKnownNegativeOp1 = Known.isNegative();
451       bool isKnownNegativeOp0 = Known2.isNegative();
452       // The product of two numbers with the same sign is non-negative.
453       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
454                            (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
455       // The product of a negative number and a non-negative number is either
456       // negative or zero.
457       if (!isKnownNonNegative)
458         isKnownNegative =
459             (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
460              Known2.isNonZero()) ||
461             (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
462     }
463   }
464 
465   Known = KnownBits::computeForMul(Known, Known2);
466 
467   // Only make use of no-wrap flags if we failed to compute the sign bit
468   // directly.  This matters if the multiplication always overflows, in
469   // which case we prefer to follow the result of the direct computation,
470   // though as the program is invoking undefined behaviour we can choose
471   // whatever we like here.
472   if (isKnownNonNegative && !Known.isNegative())
473     Known.makeNonNegative();
474   else if (isKnownNegative && !Known.isNonNegative())
475     Known.makeNegative();
476 }
477 
478 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
479                                              KnownBits &Known) {
480   unsigned BitWidth = Known.getBitWidth();
481   unsigned NumRanges = Ranges.getNumOperands() / 2;
482   assert(NumRanges >= 1);
483 
484   Known.Zero.setAllBits();
485   Known.One.setAllBits();
486 
487   for (unsigned i = 0; i < NumRanges; ++i) {
488     ConstantInt *Lower =
489         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
490     ConstantInt *Upper =
491         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
492     ConstantRange Range(Lower->getValue(), Upper->getValue());
493 
494     // The first CommonPrefixBits of all values in Range are equal.
495     unsigned CommonPrefixBits =
496         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
497     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
498     APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
499     Known.One &= UnsignedMax & Mask;
500     Known.Zero &= ~UnsignedMax & Mask;
501   }
502 }
503 
504 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
505   SmallVector<const Value *, 16> WorkSet(1, I);
506   SmallPtrSet<const Value *, 32> Visited;
507   SmallPtrSet<const Value *, 16> EphValues;
508 
509   // The instruction defining an assumption's condition itself is always
510   // considered ephemeral to that assumption (even if it has other
511   // non-ephemeral users). See r246696's test case for an example.
512   if (is_contained(I->operands(), E))
513     return true;
514 
515   while (!WorkSet.empty()) {
516     const Value *V = WorkSet.pop_back_val();
517     if (!Visited.insert(V).second)
518       continue;
519 
520     // If all uses of this value are ephemeral, then so is this value.
521     if (llvm::all_of(V->users(), [&](const User *U) {
522                                    return EphValues.count(U);
523                                  })) {
524       if (V == E)
525         return true;
526 
527       if (V == I || isSafeToSpeculativelyExecute(V)) {
528        EphValues.insert(V);
529        if (const User *U = dyn_cast<User>(V))
530          append_range(WorkSet, U->operands());
531       }
532     }
533   }
534 
535   return false;
536 }
537 
538 // Is this an intrinsic that cannot be speculated but also cannot trap?
539 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
540   if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
541     return CI->isAssumeLikeIntrinsic();
542 
543   return false;
544 }
545 
546 bool llvm::isValidAssumeForContext(const Instruction *Inv,
547                                    const Instruction *CxtI,
548                                    const DominatorTree *DT) {
549   // There are two restrictions on the use of an assume:
550   //  1. The assume must dominate the context (or the control flow must
551   //     reach the assume whenever it reaches the context).
552   //  2. The context must not be in the assume's set of ephemeral values
553   //     (otherwise we will use the assume to prove that the condition
554   //     feeding the assume is trivially true, thus causing the removal of
555   //     the assume).
556 
557   if (Inv->getParent() == CxtI->getParent()) {
558     // If Inv and CtxI are in the same block, check if the assume (Inv) is first
559     // in the BB.
560     if (Inv->comesBefore(CxtI))
561       return true;
562 
563     // Don't let an assume affect itself - this would cause the problems
564     // `isEphemeralValueOf` is trying to prevent, and it would also make
565     // the loop below go out of bounds.
566     if (Inv == CxtI)
567       return false;
568 
569     // The context comes first, but they're both in the same block.
570     // Make sure there is nothing in between that might interrupt
571     // the control flow, not even CxtI itself.
572     // We limit the scan distance between the assume and its context instruction
573     // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
574     // it can be adjusted if needed (could be turned into a cl::opt).
575     unsigned ScanLimit = 15;
576     for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
577       if (!isGuaranteedToTransferExecutionToSuccessor(&*I) || --ScanLimit == 0)
578         return false;
579 
580     return !isEphemeralValueOf(Inv, CxtI);
581   }
582 
583   // Inv and CxtI are in different blocks.
584   if (DT) {
585     if (DT->dominates(Inv, CxtI))
586       return true;
587   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
588     // We don't have a DT, but this trivially dominates.
589     return true;
590   }
591 
592   return false;
593 }
594 
595 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
596   // v u> y implies v != 0.
597   if (Pred == ICmpInst::ICMP_UGT)
598     return true;
599 
600   // Special-case v != 0 to also handle v != null.
601   if (Pred == ICmpInst::ICMP_NE)
602     return match(RHS, m_Zero());
603 
604   // All other predicates - rely on generic ConstantRange handling.
605   const APInt *C;
606   if (!match(RHS, m_APInt(C)))
607     return false;
608 
609   ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
610   return !TrueValues.contains(APInt::getNullValue(C->getBitWidth()));
611 }
612 
613 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
614   // Use of assumptions is context-sensitive. If we don't have a context, we
615   // cannot use them!
616   if (!Q.AC || !Q.CxtI)
617     return false;
618 
619   if (Q.CxtI && V->getType()->isPointerTy()) {
620     SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
621     if (!NullPointerIsDefined(Q.CxtI->getFunction(),
622                               V->getType()->getPointerAddressSpace()))
623       AttrKinds.push_back(Attribute::Dereferenceable);
624 
625     if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
626       return true;
627   }
628 
629   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
630     if (!AssumeVH)
631       continue;
632     CallInst *I = cast<CallInst>(AssumeVH);
633     assert(I->getFunction() == Q.CxtI->getFunction() &&
634            "Got assumption for the wrong function!");
635     if (Q.isExcluded(I))
636       continue;
637 
638     // Warning: This loop can end up being somewhat performance sensitive.
639     // We're running this loop for once for each value queried resulting in a
640     // runtime of ~O(#assumes * #values).
641 
642     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
643            "must be an assume intrinsic");
644 
645     Value *RHS;
646     CmpInst::Predicate Pred;
647     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
648     if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
649       return false;
650 
651     if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
652       return true;
653   }
654 
655   return false;
656 }
657 
658 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
659                                        unsigned Depth, const Query &Q) {
660   // Use of assumptions is context-sensitive. If we don't have a context, we
661   // cannot use them!
662   if (!Q.AC || !Q.CxtI)
663     return;
664 
665   unsigned BitWidth = Known.getBitWidth();
666 
667   // Refine Known set if the pointer alignment is set by assume bundles.
668   if (V->getType()->isPointerTy()) {
669     if (RetainedKnowledge RK = getKnowledgeValidInContext(
670             V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
671       Known.Zero.setLowBits(Log2_32(RK.ArgValue));
672     }
673   }
674 
675   // Note that the patterns below need to be kept in sync with the code
676   // in AssumptionCache::updateAffectedValues.
677 
678   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
679     if (!AssumeVH)
680       continue;
681     CallInst *I = cast<CallInst>(AssumeVH);
682     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
683            "Got assumption for the wrong function!");
684     if (Q.isExcluded(I))
685       continue;
686 
687     // Warning: This loop can end up being somewhat performance sensitive.
688     // We're running this loop for once for each value queried resulting in a
689     // runtime of ~O(#assumes * #values).
690 
691     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
692            "must be an assume intrinsic");
693 
694     Value *Arg = I->getArgOperand(0);
695 
696     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
697       assert(BitWidth == 1 && "assume operand is not i1?");
698       Known.setAllOnes();
699       return;
700     }
701     if (match(Arg, m_Not(m_Specific(V))) &&
702         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
703       assert(BitWidth == 1 && "assume operand is not i1?");
704       Known.setAllZero();
705       return;
706     }
707 
708     // The remaining tests are all recursive, so bail out if we hit the limit.
709     if (Depth == MaxAnalysisRecursionDepth)
710       continue;
711 
712     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
713     if (!Cmp)
714       continue;
715 
716     // Note that ptrtoint may change the bitwidth.
717     Value *A, *B;
718     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
719 
720     CmpInst::Predicate Pred;
721     uint64_t C;
722     switch (Cmp->getPredicate()) {
723     default:
724       break;
725     case ICmpInst::ICMP_EQ:
726       // assume(v = a)
727       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
728           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
729         KnownBits RHSKnown =
730             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
731         Known.Zero |= RHSKnown.Zero;
732         Known.One  |= RHSKnown.One;
733       // assume(v & b = a)
734       } else if (match(Cmp,
735                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
736                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
737         KnownBits RHSKnown =
738             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
739         KnownBits MaskKnown =
740             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
741 
742         // For those bits in the mask that are known to be one, we can propagate
743         // known bits from the RHS to V.
744         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
745         Known.One  |= RHSKnown.One  & MaskKnown.One;
746       // assume(~(v & b) = a)
747       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
748                                      m_Value(A))) &&
749                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
750         KnownBits RHSKnown =
751             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
752         KnownBits MaskKnown =
753             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
754 
755         // For those bits in the mask that are known to be one, we can propagate
756         // inverted known bits from the RHS to V.
757         Known.Zero |= RHSKnown.One  & MaskKnown.One;
758         Known.One  |= RHSKnown.Zero & MaskKnown.One;
759       // assume(v | b = a)
760       } else if (match(Cmp,
761                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
762                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
763         KnownBits RHSKnown =
764             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
765         KnownBits BKnown =
766             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
767 
768         // For those bits in B that are known to be zero, we can propagate known
769         // bits from the RHS to V.
770         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
771         Known.One  |= RHSKnown.One  & BKnown.Zero;
772       // assume(~(v | b) = a)
773       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
774                                      m_Value(A))) &&
775                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
776         KnownBits RHSKnown =
777             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
778         KnownBits BKnown =
779             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
780 
781         // For those bits in B that are known to be zero, we can propagate
782         // inverted known bits from the RHS to V.
783         Known.Zero |= RHSKnown.One  & BKnown.Zero;
784         Known.One  |= RHSKnown.Zero & BKnown.Zero;
785       // assume(v ^ b = a)
786       } else if (match(Cmp,
787                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
788                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
789         KnownBits RHSKnown =
790             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
791         KnownBits BKnown =
792             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
793 
794         // For those bits in B that are known to be zero, we can propagate known
795         // bits from the RHS to V. For those bits in B that are known to be one,
796         // we can propagate inverted known bits from the RHS to V.
797         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
798         Known.One  |= RHSKnown.One  & BKnown.Zero;
799         Known.Zero |= RHSKnown.One  & BKnown.One;
800         Known.One  |= RHSKnown.Zero & BKnown.One;
801       // assume(~(v ^ b) = a)
802       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
803                                      m_Value(A))) &&
804                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
805         KnownBits RHSKnown =
806             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
807         KnownBits BKnown =
808             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
809 
810         // For those bits in B that are known to be zero, we can propagate
811         // inverted known bits from the RHS to V. For those bits in B that are
812         // known to be one, we can propagate known bits from the RHS to V.
813         Known.Zero |= RHSKnown.One  & BKnown.Zero;
814         Known.One  |= RHSKnown.Zero & BKnown.Zero;
815         Known.Zero |= RHSKnown.Zero & BKnown.One;
816         Known.One  |= RHSKnown.One  & BKnown.One;
817       // assume(v << c = a)
818       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
819                                      m_Value(A))) &&
820                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
821         KnownBits RHSKnown =
822             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
823 
824         // For those bits in RHS that are known, we can propagate them to known
825         // bits in V shifted to the right by C.
826         RHSKnown.Zero.lshrInPlace(C);
827         Known.Zero |= RHSKnown.Zero;
828         RHSKnown.One.lshrInPlace(C);
829         Known.One  |= RHSKnown.One;
830       // assume(~(v << c) = a)
831       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
832                                      m_Value(A))) &&
833                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
834         KnownBits RHSKnown =
835             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
836         // For those bits in RHS that are known, we can propagate them inverted
837         // to known bits in V shifted to the right by C.
838         RHSKnown.One.lshrInPlace(C);
839         Known.Zero |= RHSKnown.One;
840         RHSKnown.Zero.lshrInPlace(C);
841         Known.One  |= RHSKnown.Zero;
842       // assume(v >> c = a)
843       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
844                                      m_Value(A))) &&
845                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
846         KnownBits RHSKnown =
847             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
848         // For those bits in RHS that are known, we can propagate them to known
849         // bits in V shifted to the right by C.
850         Known.Zero |= RHSKnown.Zero << C;
851         Known.One  |= RHSKnown.One  << C;
852       // assume(~(v >> c) = a)
853       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
854                                      m_Value(A))) &&
855                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
856         KnownBits RHSKnown =
857             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
858         // For those bits in RHS that are known, we can propagate them inverted
859         // to known bits in V shifted to the right by C.
860         Known.Zero |= RHSKnown.One  << C;
861         Known.One  |= RHSKnown.Zero << C;
862       }
863       break;
864     case ICmpInst::ICMP_SGE:
865       // assume(v >=_s c) where c is non-negative
866       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
867           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
868         KnownBits RHSKnown =
869             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
870 
871         if (RHSKnown.isNonNegative()) {
872           // We know that the sign bit is zero.
873           Known.makeNonNegative();
874         }
875       }
876       break;
877     case ICmpInst::ICMP_SGT:
878       // assume(v >_s c) where c is at least -1.
879       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
880           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
881         KnownBits RHSKnown =
882             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
883 
884         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
885           // We know that the sign bit is zero.
886           Known.makeNonNegative();
887         }
888       }
889       break;
890     case ICmpInst::ICMP_SLE:
891       // assume(v <=_s c) where c is negative
892       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
893           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
894         KnownBits RHSKnown =
895             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
896 
897         if (RHSKnown.isNegative()) {
898           // We know that the sign bit is one.
899           Known.makeNegative();
900         }
901       }
902       break;
903     case ICmpInst::ICMP_SLT:
904       // assume(v <_s c) where c is non-positive
905       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
906           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
907         KnownBits RHSKnown =
908             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
909 
910         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
911           // We know that the sign bit is one.
912           Known.makeNegative();
913         }
914       }
915       break;
916     case ICmpInst::ICMP_ULE:
917       // assume(v <=_u c)
918       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
919           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
920         KnownBits RHSKnown =
921             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
922 
923         // Whatever high bits in c are zero are known to be zero.
924         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
925       }
926       break;
927     case ICmpInst::ICMP_ULT:
928       // assume(v <_u c)
929       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
930           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
931         KnownBits RHSKnown =
932             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
933 
934         // If the RHS is known zero, then this assumption must be wrong (nothing
935         // is unsigned less than zero). Signal a conflict and get out of here.
936         if (RHSKnown.isZero()) {
937           Known.Zero.setAllBits();
938           Known.One.setAllBits();
939           break;
940         }
941 
942         // Whatever high bits in c are zero are known to be zero (if c is a power
943         // of 2, then one more).
944         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
945           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
946         else
947           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
948       }
949       break;
950     }
951   }
952 
953   // If assumptions conflict with each other or previous known bits, then we
954   // have a logical fallacy. It's possible that the assumption is not reachable,
955   // so this isn't a real bug. On the other hand, the program may have undefined
956   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
957   // clear out the known bits, try to warn the user, and hope for the best.
958   if (Known.Zero.intersects(Known.One)) {
959     Known.resetAll();
960 
961     if (Q.ORE)
962       Q.ORE->emit([&]() {
963         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
964         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
965                                           CxtI)
966                << "Detected conflicting code assumptions. Program may "
967                   "have undefined behavior, or compiler may have "
968                   "internal error.";
969       });
970   }
971 }
972 
973 /// Compute known bits from a shift operator, including those with a
974 /// non-constant shift amount. Known is the output of this function. Known2 is a
975 /// pre-allocated temporary with the same bit width as Known and on return
976 /// contains the known bit of the shift value source. KF is an
977 /// operator-specific function that, given the known-bits and a shift amount,
978 /// compute the implied known-bits of the shift operator's result respectively
979 /// for that shift amount. The results from calling KF are conservatively
980 /// combined for all permitted shift amounts.
981 static void computeKnownBitsFromShiftOperator(
982     const Operator *I, const APInt &DemandedElts, KnownBits &Known,
983     KnownBits &Known2, unsigned Depth, const Query &Q,
984     function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
985   unsigned BitWidth = Known.getBitWidth();
986   computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
987   computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
988 
989   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
990   // BitWidth > 64 and any upper bits are known, we'll end up returning the
991   // limit value (which implies all bits are known).
992   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
993   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
994   bool ShiftAmtIsConstant = Known.isConstant();
995   bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
996 
997   if (ShiftAmtIsConstant) {
998     Known = KF(Known2, Known);
999 
1000     // If the known bits conflict, this must be an overflowing left shift, so
1001     // the shift result is poison. We can return anything we want. Choose 0 for
1002     // the best folding opportunity.
1003     if (Known.hasConflict())
1004       Known.setAllZero();
1005 
1006     return;
1007   }
1008 
1009   // If the shift amount could be greater than or equal to the bit-width of the
1010   // LHS, the value could be poison, but bail out because the check below is
1011   // expensive.
1012   // TODO: Should we just carry on?
1013   if (MaxShiftAmtIsOutOfRange) {
1014     Known.resetAll();
1015     return;
1016   }
1017 
1018   // It would be more-clearly correct to use the two temporaries for this
1019   // calculation. Reusing the APInts here to prevent unnecessary allocations.
1020   Known.resetAll();
1021 
1022   // If we know the shifter operand is nonzero, we can sometimes infer more
1023   // known bits. However this is expensive to compute, so be lazy about it and
1024   // only compute it when absolutely necessary.
1025   Optional<bool> ShifterOperandIsNonZero;
1026 
1027   // Early exit if we can't constrain any well-defined shift amount.
1028   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1029       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1030     ShifterOperandIsNonZero =
1031         isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1032     if (!*ShifterOperandIsNonZero)
1033       return;
1034   }
1035 
1036   Known.Zero.setAllBits();
1037   Known.One.setAllBits();
1038   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1039     // Combine the shifted known input bits only for those shift amounts
1040     // compatible with its known constraints.
1041     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1042       continue;
1043     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1044       continue;
1045     // If we know the shifter is nonzero, we may be able to infer more known
1046     // bits. This check is sunk down as far as possible to avoid the expensive
1047     // call to isKnownNonZero if the cheaper checks above fail.
1048     if (ShiftAmt == 0) {
1049       if (!ShifterOperandIsNonZero.hasValue())
1050         ShifterOperandIsNonZero =
1051             isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1052       if (*ShifterOperandIsNonZero)
1053         continue;
1054     }
1055 
1056     Known = KnownBits::commonBits(
1057         Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1058   }
1059 
1060   // If the known bits conflict, the result is poison. Return a 0 and hope the
1061   // caller can further optimize that.
1062   if (Known.hasConflict())
1063     Known.setAllZero();
1064 }
1065 
1066 static void computeKnownBitsFromOperator(const Operator *I,
1067                                          const APInt &DemandedElts,
1068                                          KnownBits &Known, unsigned Depth,
1069                                          const Query &Q) {
1070   unsigned BitWidth = Known.getBitWidth();
1071 
1072   KnownBits Known2(BitWidth);
1073   switch (I->getOpcode()) {
1074   default: break;
1075   case Instruction::Load:
1076     if (MDNode *MD =
1077             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1078       computeKnownBitsFromRangeMetadata(*MD, Known);
1079     break;
1080   case Instruction::And: {
1081     // If either the LHS or the RHS are Zero, the result is zero.
1082     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1083     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1084 
1085     Known &= Known2;
1086 
1087     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1088     // here we handle the more general case of adding any odd number by
1089     // matching the form add(x, add(x, y)) where y is odd.
1090     // TODO: This could be generalized to clearing any bit set in y where the
1091     // following bit is known to be unset in y.
1092     Value *X = nullptr, *Y = nullptr;
1093     if (!Known.Zero[0] && !Known.One[0] &&
1094         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1095       Known2.resetAll();
1096       computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1097       if (Known2.countMinTrailingOnes() > 0)
1098         Known.Zero.setBit(0);
1099     }
1100     break;
1101   }
1102   case Instruction::Or:
1103     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1104     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1105 
1106     Known |= Known2;
1107     break;
1108   case Instruction::Xor:
1109     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1110     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1111 
1112     Known ^= Known2;
1113     break;
1114   case Instruction::Mul: {
1115     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1116     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1117                         Known, Known2, Depth, Q);
1118     break;
1119   }
1120   case Instruction::UDiv: {
1121     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1122     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1123     Known = KnownBits::udiv(Known, Known2);
1124     break;
1125   }
1126   case Instruction::Select: {
1127     const Value *LHS = nullptr, *RHS = nullptr;
1128     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1129     if (SelectPatternResult::isMinOrMax(SPF)) {
1130       computeKnownBits(RHS, Known, Depth + 1, Q);
1131       computeKnownBits(LHS, Known2, Depth + 1, Q);
1132       switch (SPF) {
1133       default:
1134         llvm_unreachable("Unhandled select pattern flavor!");
1135       case SPF_SMAX:
1136         Known = KnownBits::smax(Known, Known2);
1137         break;
1138       case SPF_SMIN:
1139         Known = KnownBits::smin(Known, Known2);
1140         break;
1141       case SPF_UMAX:
1142         Known = KnownBits::umax(Known, Known2);
1143         break;
1144       case SPF_UMIN:
1145         Known = KnownBits::umin(Known, Known2);
1146         break;
1147       }
1148       break;
1149     }
1150 
1151     computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1152     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1153 
1154     // Only known if known in both the LHS and RHS.
1155     Known = KnownBits::commonBits(Known, Known2);
1156 
1157     if (SPF == SPF_ABS) {
1158       // RHS from matchSelectPattern returns the negation part of abs pattern.
1159       // If the negate has an NSW flag we can assume the sign bit of the result
1160       // will be 0 because that makes abs(INT_MIN) undefined.
1161       if (match(RHS, m_Neg(m_Specific(LHS))) &&
1162           Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1163         Known.Zero.setSignBit();
1164     }
1165 
1166     break;
1167   }
1168   case Instruction::FPTrunc:
1169   case Instruction::FPExt:
1170   case Instruction::FPToUI:
1171   case Instruction::FPToSI:
1172   case Instruction::SIToFP:
1173   case Instruction::UIToFP:
1174     break; // Can't work with floating point.
1175   case Instruction::PtrToInt:
1176   case Instruction::IntToPtr:
1177     // Fall through and handle them the same as zext/trunc.
1178     LLVM_FALLTHROUGH;
1179   case Instruction::ZExt:
1180   case Instruction::Trunc: {
1181     Type *SrcTy = I->getOperand(0)->getType();
1182 
1183     unsigned SrcBitWidth;
1184     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1185     // which fall through here.
1186     Type *ScalarTy = SrcTy->getScalarType();
1187     SrcBitWidth = ScalarTy->isPointerTy() ?
1188       Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1189       Q.DL.getTypeSizeInBits(ScalarTy);
1190 
1191     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1192     Known = Known.anyextOrTrunc(SrcBitWidth);
1193     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1194     Known = Known.zextOrTrunc(BitWidth);
1195     break;
1196   }
1197   case Instruction::BitCast: {
1198     Type *SrcTy = I->getOperand(0)->getType();
1199     if (SrcTy->isIntOrPtrTy() &&
1200         // TODO: For now, not handling conversions like:
1201         // (bitcast i64 %x to <2 x i32>)
1202         !I->getType()->isVectorTy()) {
1203       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1204       break;
1205     }
1206     break;
1207   }
1208   case Instruction::SExt: {
1209     // Compute the bits in the result that are not present in the input.
1210     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1211 
1212     Known = Known.trunc(SrcBitWidth);
1213     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1214     // If the sign bit of the input is known set or clear, then we know the
1215     // top bits of the result.
1216     Known = Known.sext(BitWidth);
1217     break;
1218   }
1219   case Instruction::Shl: {
1220     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1221     auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1222       KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1223       // If this shift has "nsw" keyword, then the result is either a poison
1224       // value or has the same sign bit as the first operand.
1225       if (NSW) {
1226         if (KnownVal.Zero.isSignBitSet())
1227           Result.Zero.setSignBit();
1228         if (KnownVal.One.isSignBitSet())
1229           Result.One.setSignBit();
1230       }
1231       return Result;
1232     };
1233     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1234                                       KF);
1235     // Trailing zeros of a right-shifted constant never decrease.
1236     const APInt *C;
1237     if (match(I->getOperand(0), m_APInt(C)))
1238       Known.Zero.setLowBits(C->countTrailingZeros());
1239     break;
1240   }
1241   case Instruction::LShr: {
1242     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1243       return KnownBits::lshr(KnownVal, KnownAmt);
1244     };
1245     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1246                                       KF);
1247     // Leading zeros of a left-shifted constant never decrease.
1248     const APInt *C;
1249     if (match(I->getOperand(0), m_APInt(C)))
1250       Known.Zero.setHighBits(C->countLeadingZeros());
1251     break;
1252   }
1253   case Instruction::AShr: {
1254     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1255       return KnownBits::ashr(KnownVal, KnownAmt);
1256     };
1257     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1258                                       KF);
1259     break;
1260   }
1261   case Instruction::Sub: {
1262     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1263     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1264                            DemandedElts, Known, Known2, Depth, Q);
1265     break;
1266   }
1267   case Instruction::Add: {
1268     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1269     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1270                            DemandedElts, Known, Known2, Depth, Q);
1271     break;
1272   }
1273   case Instruction::SRem:
1274     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1275     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1276     Known = KnownBits::srem(Known, Known2);
1277     break;
1278 
1279   case Instruction::URem:
1280     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1281     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1282     Known = KnownBits::urem(Known, Known2);
1283     break;
1284   case Instruction::Alloca:
1285     Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1286     break;
1287   case Instruction::GetElementPtr: {
1288     // Analyze all of the subscripts of this getelementptr instruction
1289     // to determine if we can prove known low zero bits.
1290     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1291     // Accumulate the constant indices in a separate variable
1292     // to minimize the number of calls to computeForAddSub.
1293     APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1294 
1295     gep_type_iterator GTI = gep_type_begin(I);
1296     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1297       // TrailZ can only become smaller, short-circuit if we hit zero.
1298       if (Known.isUnknown())
1299         break;
1300 
1301       Value *Index = I->getOperand(i);
1302 
1303       // Handle case when index is zero.
1304       Constant *CIndex = dyn_cast<Constant>(Index);
1305       if (CIndex && CIndex->isZeroValue())
1306         continue;
1307 
1308       if (StructType *STy = GTI.getStructTypeOrNull()) {
1309         // Handle struct member offset arithmetic.
1310 
1311         assert(CIndex &&
1312                "Access to structure field must be known at compile time");
1313 
1314         if (CIndex->getType()->isVectorTy())
1315           Index = CIndex->getSplatValue();
1316 
1317         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1318         const StructLayout *SL = Q.DL.getStructLayout(STy);
1319         uint64_t Offset = SL->getElementOffset(Idx);
1320         AccConstIndices += Offset;
1321         continue;
1322       }
1323 
1324       // Handle array index arithmetic.
1325       Type *IndexedTy = GTI.getIndexedType();
1326       if (!IndexedTy->isSized()) {
1327         Known.resetAll();
1328         break;
1329       }
1330 
1331       unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1332       KnownBits IndexBits(IndexBitWidth);
1333       computeKnownBits(Index, IndexBits, Depth + 1, Q);
1334       TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1335       uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1336       KnownBits ScalingFactor(IndexBitWidth);
1337       // Multiply by current sizeof type.
1338       // &A[i] == A + i * sizeof(*A[i]).
1339       if (IndexTypeSize.isScalable()) {
1340         // For scalable types the only thing we know about sizeof is
1341         // that this is a multiple of the minimum size.
1342         ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1343       } else if (IndexBits.isConstant()) {
1344         APInt IndexConst = IndexBits.getConstant();
1345         APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1346         IndexConst *= ScalingFactor;
1347         AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1348         continue;
1349       } else {
1350         ScalingFactor =
1351             KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1352       }
1353       IndexBits = KnownBits::computeForMul(IndexBits, ScalingFactor);
1354 
1355       // If the offsets have a different width from the pointer, according
1356       // to the language reference we need to sign-extend or truncate them
1357       // to the width of the pointer.
1358       IndexBits = IndexBits.sextOrTrunc(BitWidth);
1359 
1360       // Note that inbounds does *not* guarantee nsw for the addition, as only
1361       // the offset is signed, while the base address is unsigned.
1362       Known = KnownBits::computeForAddSub(
1363           /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1364     }
1365     if (!Known.isUnknown() && !AccConstIndices.isNullValue()) {
1366       KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1367       Known = KnownBits::computeForAddSub(
1368           /*Add=*/true, /*NSW=*/false, Known, Index);
1369     }
1370     break;
1371   }
1372   case Instruction::PHI: {
1373     const PHINode *P = cast<PHINode>(I);
1374     BinaryOperator *BO = nullptr;
1375     Value *R = nullptr, *L = nullptr;
1376     if (matchSimpleRecurrence(P, BO, R, L)) {
1377       // Handle the case of a simple two-predecessor recurrence PHI.
1378       // There's a lot more that could theoretically be done here, but
1379       // this is sufficient to catch some interesting cases.
1380       unsigned Opcode = BO->getOpcode();
1381 
1382       // If this is a shift recurrence, we know the bits being shifted in.
1383       // We can combine that with information about the start value of the
1384       // recurrence to conclude facts about the result.
1385       if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1386            Opcode == Instruction::Shl) &&
1387           BO->getOperand(0) == I) {
1388 
1389         // We have matched a recurrence of the form:
1390         // %iv = [R, %entry], [%iv.next, %backedge]
1391         // %iv.next = shift_op %iv, L
1392 
1393         // Recurse with the phi context to avoid concern about whether facts
1394         // inferred hold at original context instruction.  TODO: It may be
1395         // correct to use the original context.  IF warranted, explore and
1396         // add sufficient tests to cover.
1397         Query RecQ = Q;
1398         RecQ.CxtI = P;
1399         computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1400         switch (Opcode) {
1401         case Instruction::Shl:
1402           // A shl recurrence will only increase the tailing zeros
1403           Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1404           break;
1405         case Instruction::LShr:
1406           // A lshr recurrence will preserve the leading zeros of the
1407           // start value
1408           Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1409           break;
1410         case Instruction::AShr:
1411           // An ashr recurrence will extend the initial sign bit
1412           Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1413           Known.One.setHighBits(Known2.countMinLeadingOnes());
1414           break;
1415         };
1416       }
1417 
1418       // Check for operations that have the property that if
1419       // both their operands have low zero bits, the result
1420       // will have low zero bits.
1421       if (Opcode == Instruction::Add ||
1422           Opcode == Instruction::Sub ||
1423           Opcode == Instruction::And ||
1424           Opcode == Instruction::Or ||
1425           Opcode == Instruction::Mul) {
1426         // Change the context instruction to the "edge" that flows into the
1427         // phi. This is important because that is where the value is actually
1428         // "evaluated" even though it is used later somewhere else. (see also
1429         // D69571).
1430         Query RecQ = Q;
1431 
1432         unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1433         Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1434         Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1435 
1436         // Ok, we have a PHI of the form L op= R. Check for low
1437         // zero bits.
1438         RecQ.CxtI = RInst;
1439         computeKnownBits(R, Known2, Depth + 1, RecQ);
1440 
1441         // We need to take the minimum number of known bits
1442         KnownBits Known3(BitWidth);
1443         RecQ.CxtI = LInst;
1444         computeKnownBits(L, Known3, Depth + 1, RecQ);
1445 
1446         Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1447                                        Known3.countMinTrailingZeros()));
1448 
1449         auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1450         if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1451           // If initial value of recurrence is nonnegative, and we are adding
1452           // a nonnegative number with nsw, the result can only be nonnegative
1453           // or poison value regardless of the number of times we execute the
1454           // add in phi recurrence. If initial value is negative and we are
1455           // adding a negative number with nsw, the result can only be
1456           // negative or poison value. Similar arguments apply to sub and mul.
1457           //
1458           // (add non-negative, non-negative) --> non-negative
1459           // (add negative, negative) --> negative
1460           if (Opcode == Instruction::Add) {
1461             if (Known2.isNonNegative() && Known3.isNonNegative())
1462               Known.makeNonNegative();
1463             else if (Known2.isNegative() && Known3.isNegative())
1464               Known.makeNegative();
1465           }
1466 
1467           // (sub nsw non-negative, negative) --> non-negative
1468           // (sub nsw negative, non-negative) --> negative
1469           else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1470             if (Known2.isNonNegative() && Known3.isNegative())
1471               Known.makeNonNegative();
1472             else if (Known2.isNegative() && Known3.isNonNegative())
1473               Known.makeNegative();
1474           }
1475 
1476           // (mul nsw non-negative, non-negative) --> non-negative
1477           else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1478                    Known3.isNonNegative())
1479             Known.makeNonNegative();
1480         }
1481 
1482         break;
1483       }
1484     }
1485 
1486     // Unreachable blocks may have zero-operand PHI nodes.
1487     if (P->getNumIncomingValues() == 0)
1488       break;
1489 
1490     // Otherwise take the unions of the known bit sets of the operands,
1491     // taking conservative care to avoid excessive recursion.
1492     if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1493       // Skip if every incoming value references to ourself.
1494       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1495         break;
1496 
1497       Known.Zero.setAllBits();
1498       Known.One.setAllBits();
1499       for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1500         Value *IncValue = P->getIncomingValue(u);
1501         // Skip direct self references.
1502         if (IncValue == P) continue;
1503 
1504         // Change the context instruction to the "edge" that flows into the
1505         // phi. This is important because that is where the value is actually
1506         // "evaluated" even though it is used later somewhere else. (see also
1507         // D69571).
1508         Query RecQ = Q;
1509         RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1510 
1511         Known2 = KnownBits(BitWidth);
1512         // Recurse, but cap the recursion to one level, because we don't
1513         // want to waste time spinning around in loops.
1514         computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1515         Known = KnownBits::commonBits(Known, Known2);
1516         // If all bits have been ruled out, there's no need to check
1517         // more operands.
1518         if (Known.isUnknown())
1519           break;
1520       }
1521     }
1522     break;
1523   }
1524   case Instruction::Call:
1525   case Instruction::Invoke:
1526     // If range metadata is attached to this call, set known bits from that,
1527     // and then intersect with known bits based on other properties of the
1528     // function.
1529     if (MDNode *MD =
1530             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1531       computeKnownBitsFromRangeMetadata(*MD, Known);
1532     if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1533       computeKnownBits(RV, Known2, Depth + 1, Q);
1534       Known.Zero |= Known2.Zero;
1535       Known.One |= Known2.One;
1536     }
1537     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1538       switch (II->getIntrinsicID()) {
1539       default: break;
1540       case Intrinsic::abs: {
1541         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1542         bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1543         Known = Known2.abs(IntMinIsPoison);
1544         break;
1545       }
1546       case Intrinsic::bitreverse:
1547         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1548         Known.Zero |= Known2.Zero.reverseBits();
1549         Known.One |= Known2.One.reverseBits();
1550         break;
1551       case Intrinsic::bswap:
1552         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1553         Known.Zero |= Known2.Zero.byteSwap();
1554         Known.One |= Known2.One.byteSwap();
1555         break;
1556       case Intrinsic::ctlz: {
1557         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1558         // If we have a known 1, its position is our upper bound.
1559         unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1560         // If this call is undefined for 0, the result will be less than 2^n.
1561         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1562           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1563         unsigned LowBits = Log2_32(PossibleLZ)+1;
1564         Known.Zero.setBitsFrom(LowBits);
1565         break;
1566       }
1567       case Intrinsic::cttz: {
1568         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1569         // If we have a known 1, its position is our upper bound.
1570         unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1571         // If this call is undefined for 0, the result will be less than 2^n.
1572         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1573           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1574         unsigned LowBits = Log2_32(PossibleTZ)+1;
1575         Known.Zero.setBitsFrom(LowBits);
1576         break;
1577       }
1578       case Intrinsic::ctpop: {
1579         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1580         // We can bound the space the count needs.  Also, bits known to be zero
1581         // can't contribute to the population.
1582         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1583         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1584         Known.Zero.setBitsFrom(LowBits);
1585         // TODO: we could bound KnownOne using the lower bound on the number
1586         // of bits which might be set provided by popcnt KnownOne2.
1587         break;
1588       }
1589       case Intrinsic::fshr:
1590       case Intrinsic::fshl: {
1591         const APInt *SA;
1592         if (!match(I->getOperand(2), m_APInt(SA)))
1593           break;
1594 
1595         // Normalize to funnel shift left.
1596         uint64_t ShiftAmt = SA->urem(BitWidth);
1597         if (II->getIntrinsicID() == Intrinsic::fshr)
1598           ShiftAmt = BitWidth - ShiftAmt;
1599 
1600         KnownBits Known3(BitWidth);
1601         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1602         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1603 
1604         Known.Zero =
1605             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1606         Known.One =
1607             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1608         break;
1609       }
1610       case Intrinsic::uadd_sat:
1611       case Intrinsic::usub_sat: {
1612         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1613         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1614         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1615 
1616         // Add: Leading ones of either operand are preserved.
1617         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1618         // as leading zeros in the result.
1619         unsigned LeadingKnown;
1620         if (IsAdd)
1621           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1622                                   Known2.countMinLeadingOnes());
1623         else
1624           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1625                                   Known2.countMinLeadingOnes());
1626 
1627         Known = KnownBits::computeForAddSub(
1628             IsAdd, /* NSW */ false, Known, Known2);
1629 
1630         // We select between the operation result and all-ones/zero
1631         // respectively, so we can preserve known ones/zeros.
1632         if (IsAdd) {
1633           Known.One.setHighBits(LeadingKnown);
1634           Known.Zero.clearAllBits();
1635         } else {
1636           Known.Zero.setHighBits(LeadingKnown);
1637           Known.One.clearAllBits();
1638         }
1639         break;
1640       }
1641       case Intrinsic::umin:
1642         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1643         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1644         Known = KnownBits::umin(Known, Known2);
1645         break;
1646       case Intrinsic::umax:
1647         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1648         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1649         Known = KnownBits::umax(Known, Known2);
1650         break;
1651       case Intrinsic::smin:
1652         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1653         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1654         Known = KnownBits::smin(Known, Known2);
1655         break;
1656       case Intrinsic::smax:
1657         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1658         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1659         Known = KnownBits::smax(Known, Known2);
1660         break;
1661       case Intrinsic::x86_sse42_crc32_64_64:
1662         Known.Zero.setBitsFrom(32);
1663         break;
1664       }
1665     }
1666     break;
1667   case Instruction::ShuffleVector: {
1668     auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1669     // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1670     if (!Shuf) {
1671       Known.resetAll();
1672       return;
1673     }
1674     // For undef elements, we don't know anything about the common state of
1675     // the shuffle result.
1676     APInt DemandedLHS, DemandedRHS;
1677     if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1678       Known.resetAll();
1679       return;
1680     }
1681     Known.One.setAllBits();
1682     Known.Zero.setAllBits();
1683     if (!!DemandedLHS) {
1684       const Value *LHS = Shuf->getOperand(0);
1685       computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1686       // If we don't know any bits, early out.
1687       if (Known.isUnknown())
1688         break;
1689     }
1690     if (!!DemandedRHS) {
1691       const Value *RHS = Shuf->getOperand(1);
1692       computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1693       Known = KnownBits::commonBits(Known, Known2);
1694     }
1695     break;
1696   }
1697   case Instruction::InsertElement: {
1698     const Value *Vec = I->getOperand(0);
1699     const Value *Elt = I->getOperand(1);
1700     auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1701     // Early out if the index is non-constant or out-of-range.
1702     unsigned NumElts = DemandedElts.getBitWidth();
1703     if (!CIdx || CIdx->getValue().uge(NumElts)) {
1704       Known.resetAll();
1705       return;
1706     }
1707     Known.One.setAllBits();
1708     Known.Zero.setAllBits();
1709     unsigned EltIdx = CIdx->getZExtValue();
1710     // Do we demand the inserted element?
1711     if (DemandedElts[EltIdx]) {
1712       computeKnownBits(Elt, Known, Depth + 1, Q);
1713       // If we don't know any bits, early out.
1714       if (Known.isUnknown())
1715         break;
1716     }
1717     // We don't need the base vector element that has been inserted.
1718     APInt DemandedVecElts = DemandedElts;
1719     DemandedVecElts.clearBit(EltIdx);
1720     if (!!DemandedVecElts) {
1721       computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1722       Known = KnownBits::commonBits(Known, Known2);
1723     }
1724     break;
1725   }
1726   case Instruction::ExtractElement: {
1727     // Look through extract element. If the index is non-constant or
1728     // out-of-range demand all elements, otherwise just the extracted element.
1729     const Value *Vec = I->getOperand(0);
1730     const Value *Idx = I->getOperand(1);
1731     auto *CIdx = dyn_cast<ConstantInt>(Idx);
1732     if (isa<ScalableVectorType>(Vec->getType())) {
1733       // FIXME: there's probably *something* we can do with scalable vectors
1734       Known.resetAll();
1735       break;
1736     }
1737     unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1738     APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1739     if (CIdx && CIdx->getValue().ult(NumElts))
1740       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1741     computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1742     break;
1743   }
1744   case Instruction::ExtractValue:
1745     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1746       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1747       if (EVI->getNumIndices() != 1) break;
1748       if (EVI->getIndices()[0] == 0) {
1749         switch (II->getIntrinsicID()) {
1750         default: break;
1751         case Intrinsic::uadd_with_overflow:
1752         case Intrinsic::sadd_with_overflow:
1753           computeKnownBitsAddSub(true, II->getArgOperand(0),
1754                                  II->getArgOperand(1), false, DemandedElts,
1755                                  Known, Known2, Depth, Q);
1756           break;
1757         case Intrinsic::usub_with_overflow:
1758         case Intrinsic::ssub_with_overflow:
1759           computeKnownBitsAddSub(false, II->getArgOperand(0),
1760                                  II->getArgOperand(1), false, DemandedElts,
1761                                  Known, Known2, Depth, Q);
1762           break;
1763         case Intrinsic::umul_with_overflow:
1764         case Intrinsic::smul_with_overflow:
1765           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1766                               DemandedElts, Known, Known2, Depth, Q);
1767           break;
1768         }
1769       }
1770     }
1771     break;
1772   case Instruction::Freeze:
1773     if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1774                                   Depth + 1))
1775       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1776     break;
1777   }
1778 }
1779 
1780 /// Determine which bits of V are known to be either zero or one and return
1781 /// them.
1782 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1783                            unsigned Depth, const Query &Q) {
1784   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1785   computeKnownBits(V, DemandedElts, Known, Depth, Q);
1786   return Known;
1787 }
1788 
1789 /// Determine which bits of V are known to be either zero or one and return
1790 /// them.
1791 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1792   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1793   computeKnownBits(V, Known, Depth, Q);
1794   return Known;
1795 }
1796 
1797 /// Determine which bits of V are known to be either zero or one and return
1798 /// them in the Known bit set.
1799 ///
1800 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1801 /// we cannot optimize based on the assumption that it is zero without changing
1802 /// it to be an explicit zero.  If we don't change it to zero, other code could
1803 /// optimized based on the contradictory assumption that it is non-zero.
1804 /// Because instcombine aggressively folds operations with undef args anyway,
1805 /// this won't lose us code quality.
1806 ///
1807 /// This function is defined on values with integer type, values with pointer
1808 /// type, and vectors of integers.  In the case
1809 /// where V is a vector, known zero, and known one values are the
1810 /// same width as the vector element, and the bit is set only if it is true
1811 /// for all of the demanded elements in the vector specified by DemandedElts.
1812 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1813                       KnownBits &Known, unsigned Depth, const Query &Q) {
1814   if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1815     // No demanded elts or V is a scalable vector, better to assume we don't
1816     // know anything.
1817     Known.resetAll();
1818     return;
1819   }
1820 
1821   assert(V && "No Value?");
1822   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1823 
1824 #ifndef NDEBUG
1825   Type *Ty = V->getType();
1826   unsigned BitWidth = Known.getBitWidth();
1827 
1828   assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1829          "Not integer or pointer type!");
1830 
1831   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1832     assert(
1833         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1834         "DemandedElt width should equal the fixed vector number of elements");
1835   } else {
1836     assert(DemandedElts == APInt(1, 1) &&
1837            "DemandedElt width should be 1 for scalars");
1838   }
1839 
1840   Type *ScalarTy = Ty->getScalarType();
1841   if (ScalarTy->isPointerTy()) {
1842     assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1843            "V and Known should have same BitWidth");
1844   } else {
1845     assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1846            "V and Known should have same BitWidth");
1847   }
1848 #endif
1849 
1850   const APInt *C;
1851   if (match(V, m_APInt(C))) {
1852     // We know all of the bits for a scalar constant or a splat vector constant!
1853     Known = KnownBits::makeConstant(*C);
1854     return;
1855   }
1856   // Null and aggregate-zero are all-zeros.
1857   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1858     Known.setAllZero();
1859     return;
1860   }
1861   // Handle a constant vector by taking the intersection of the known bits of
1862   // each element.
1863   if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1864     // We know that CDV must be a vector of integers. Take the intersection of
1865     // each element.
1866     Known.Zero.setAllBits(); Known.One.setAllBits();
1867     for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1868       if (!DemandedElts[i])
1869         continue;
1870       APInt Elt = CDV->getElementAsAPInt(i);
1871       Known.Zero &= ~Elt;
1872       Known.One &= Elt;
1873     }
1874     return;
1875   }
1876 
1877   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1878     // We know that CV must be a vector of integers. Take the intersection of
1879     // each element.
1880     Known.Zero.setAllBits(); Known.One.setAllBits();
1881     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1882       if (!DemandedElts[i])
1883         continue;
1884       Constant *Element = CV->getAggregateElement(i);
1885       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1886       if (!ElementCI) {
1887         Known.resetAll();
1888         return;
1889       }
1890       const APInt &Elt = ElementCI->getValue();
1891       Known.Zero &= ~Elt;
1892       Known.One &= Elt;
1893     }
1894     return;
1895   }
1896 
1897   // Start out not knowing anything.
1898   Known.resetAll();
1899 
1900   // We can't imply anything about undefs.
1901   if (isa<UndefValue>(V))
1902     return;
1903 
1904   // There's no point in looking through other users of ConstantData for
1905   // assumptions.  Confirm that we've handled them all.
1906   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1907 
1908   // All recursive calls that increase depth must come after this.
1909   if (Depth == MaxAnalysisRecursionDepth)
1910     return;
1911 
1912   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1913   // the bits of its aliasee.
1914   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1915     if (!GA->isInterposable())
1916       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1917     return;
1918   }
1919 
1920   if (const Operator *I = dyn_cast<Operator>(V))
1921     computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1922 
1923   // Aligned pointers have trailing zeros - refine Known.Zero set
1924   if (isa<PointerType>(V->getType())) {
1925     Align Alignment = V->getPointerAlignment(Q.DL);
1926     Known.Zero.setLowBits(Log2(Alignment));
1927   }
1928 
1929   // computeKnownBitsFromAssume strictly refines Known.
1930   // Therefore, we run them after computeKnownBitsFromOperator.
1931 
1932   // Check whether a nearby assume intrinsic can determine some known bits.
1933   computeKnownBitsFromAssume(V, Known, Depth, Q);
1934 
1935   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1936 }
1937 
1938 /// Return true if the given value is known to have exactly one
1939 /// bit set when defined. For vectors return true if every element is known to
1940 /// be a power of two when defined. Supports values with integer or pointer
1941 /// types and vectors of integers.
1942 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1943                             const Query &Q) {
1944   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1945 
1946   // Attempt to match against constants.
1947   if (OrZero && match(V, m_Power2OrZero()))
1948       return true;
1949   if (match(V, m_Power2()))
1950       return true;
1951 
1952   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1953   // it is shifted off the end then the result is undefined.
1954   if (match(V, m_Shl(m_One(), m_Value())))
1955     return true;
1956 
1957   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1958   // the bottom.  If it is shifted off the bottom then the result is undefined.
1959   if (match(V, m_LShr(m_SignMask(), m_Value())))
1960     return true;
1961 
1962   // The remaining tests are all recursive, so bail out if we hit the limit.
1963   if (Depth++ == MaxAnalysisRecursionDepth)
1964     return false;
1965 
1966   Value *X = nullptr, *Y = nullptr;
1967   // A shift left or a logical shift right of a power of two is a power of two
1968   // or zero.
1969   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1970                  match(V, m_LShr(m_Value(X), m_Value()))))
1971     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1972 
1973   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1974     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1975 
1976   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1977     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1978            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1979 
1980   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1981     // A power of two and'd with anything is a power of two or zero.
1982     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1983         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1984       return true;
1985     // X & (-X) is always a power of two or zero.
1986     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1987       return true;
1988     return false;
1989   }
1990 
1991   // Adding a power-of-two or zero to the same power-of-two or zero yields
1992   // either the original power-of-two, a larger power-of-two or zero.
1993   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1994     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1995     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1996         Q.IIQ.hasNoSignedWrap(VOBO)) {
1997       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1998           match(X, m_And(m_Value(), m_Specific(Y))))
1999         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2000           return true;
2001       if (match(Y, m_And(m_Specific(X), m_Value())) ||
2002           match(Y, m_And(m_Value(), m_Specific(X))))
2003         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2004           return true;
2005 
2006       unsigned BitWidth = V->getType()->getScalarSizeInBits();
2007       KnownBits LHSBits(BitWidth);
2008       computeKnownBits(X, LHSBits, Depth, Q);
2009 
2010       KnownBits RHSBits(BitWidth);
2011       computeKnownBits(Y, RHSBits, Depth, Q);
2012       // If i8 V is a power of two or zero:
2013       //  ZeroBits: 1 1 1 0 1 1 1 1
2014       // ~ZeroBits: 0 0 0 1 0 0 0 0
2015       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2016         // If OrZero isn't set, we cannot give back a zero result.
2017         // Make sure either the LHS or RHS has a bit set.
2018         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2019           return true;
2020     }
2021   }
2022 
2023   // An exact divide or right shift can only shift off zero bits, so the result
2024   // is a power of two only if the first operand is a power of two and not
2025   // copying a sign bit (sdiv int_min, 2).
2026   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2027       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2028     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2029                                   Depth, Q);
2030   }
2031 
2032   return false;
2033 }
2034 
2035 /// Test whether a GEP's result is known to be non-null.
2036 ///
2037 /// Uses properties inherent in a GEP to try to determine whether it is known
2038 /// to be non-null.
2039 ///
2040 /// Currently this routine does not support vector GEPs.
2041 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2042                               const Query &Q) {
2043   const Function *F = nullptr;
2044   if (const Instruction *I = dyn_cast<Instruction>(GEP))
2045     F = I->getFunction();
2046 
2047   if (!GEP->isInBounds() ||
2048       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2049     return false;
2050 
2051   // FIXME: Support vector-GEPs.
2052   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2053 
2054   // If the base pointer is non-null, we cannot walk to a null address with an
2055   // inbounds GEP in address space zero.
2056   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2057     return true;
2058 
2059   // Walk the GEP operands and see if any operand introduces a non-zero offset.
2060   // If so, then the GEP cannot produce a null pointer, as doing so would
2061   // inherently violate the inbounds contract within address space zero.
2062   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2063        GTI != GTE; ++GTI) {
2064     // Struct types are easy -- they must always be indexed by a constant.
2065     if (StructType *STy = GTI.getStructTypeOrNull()) {
2066       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2067       unsigned ElementIdx = OpC->getZExtValue();
2068       const StructLayout *SL = Q.DL.getStructLayout(STy);
2069       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2070       if (ElementOffset > 0)
2071         return true;
2072       continue;
2073     }
2074 
2075     // If we have a zero-sized type, the index doesn't matter. Keep looping.
2076     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2077       continue;
2078 
2079     // Fast path the constant operand case both for efficiency and so we don't
2080     // increment Depth when just zipping down an all-constant GEP.
2081     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2082       if (!OpC->isZero())
2083         return true;
2084       continue;
2085     }
2086 
2087     // We post-increment Depth here because while isKnownNonZero increments it
2088     // as well, when we pop back up that increment won't persist. We don't want
2089     // to recurse 10k times just because we have 10k GEP operands. We don't
2090     // bail completely out because we want to handle constant GEPs regardless
2091     // of depth.
2092     if (Depth++ >= MaxAnalysisRecursionDepth)
2093       continue;
2094 
2095     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2096       return true;
2097   }
2098 
2099   return false;
2100 }
2101 
2102 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2103                                                   const Instruction *CtxI,
2104                                                   const DominatorTree *DT) {
2105   if (isa<Constant>(V))
2106     return false;
2107 
2108   if (!CtxI || !DT)
2109     return false;
2110 
2111   unsigned NumUsesExplored = 0;
2112   for (auto *U : V->users()) {
2113     // Avoid massive lists
2114     if (NumUsesExplored >= DomConditionsMaxUses)
2115       break;
2116     NumUsesExplored++;
2117 
2118     // If the value is used as an argument to a call or invoke, then argument
2119     // attributes may provide an answer about null-ness.
2120     if (const auto *CB = dyn_cast<CallBase>(U))
2121       if (auto *CalledFunc = CB->getCalledFunction())
2122         for (const Argument &Arg : CalledFunc->args())
2123           if (CB->getArgOperand(Arg.getArgNo()) == V &&
2124               Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2125               DT->dominates(CB, CtxI))
2126             return true;
2127 
2128     // If the value is used as a load/store, then the pointer must be non null.
2129     if (V == getLoadStorePointerOperand(U)) {
2130       const Instruction *I = cast<Instruction>(U);
2131       if (!NullPointerIsDefined(I->getFunction(),
2132                                 V->getType()->getPointerAddressSpace()) &&
2133           DT->dominates(I, CtxI))
2134         return true;
2135     }
2136 
2137     // Consider only compare instructions uniquely controlling a branch
2138     Value *RHS;
2139     CmpInst::Predicate Pred;
2140     if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2141       continue;
2142 
2143     bool NonNullIfTrue;
2144     if (cmpExcludesZero(Pred, RHS))
2145       NonNullIfTrue = true;
2146     else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2147       NonNullIfTrue = false;
2148     else
2149       continue;
2150 
2151     SmallVector<const User *, 4> WorkList;
2152     SmallPtrSet<const User *, 4> Visited;
2153     for (auto *CmpU : U->users()) {
2154       assert(WorkList.empty() && "Should be!");
2155       if (Visited.insert(CmpU).second)
2156         WorkList.push_back(CmpU);
2157 
2158       while (!WorkList.empty()) {
2159         auto *Curr = WorkList.pop_back_val();
2160 
2161         // If a user is an AND, add all its users to the work list. We only
2162         // propagate "pred != null" condition through AND because it is only
2163         // correct to assume that all conditions of AND are met in true branch.
2164         // TODO: Support similar logic of OR and EQ predicate?
2165         if (NonNullIfTrue)
2166           if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2167             for (auto *CurrU : Curr->users())
2168               if (Visited.insert(CurrU).second)
2169                 WorkList.push_back(CurrU);
2170             continue;
2171           }
2172 
2173         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2174           assert(BI->isConditional() && "uses a comparison!");
2175 
2176           BasicBlock *NonNullSuccessor =
2177               BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2178           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2179           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2180             return true;
2181         } else if (NonNullIfTrue && isGuard(Curr) &&
2182                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
2183           return true;
2184         }
2185       }
2186     }
2187   }
2188 
2189   return false;
2190 }
2191 
2192 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2193 /// ensure that the value it's attached to is never Value?  'RangeType' is
2194 /// is the type of the value described by the range.
2195 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2196   const unsigned NumRanges = Ranges->getNumOperands() / 2;
2197   assert(NumRanges >= 1);
2198   for (unsigned i = 0; i < NumRanges; ++i) {
2199     ConstantInt *Lower =
2200         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2201     ConstantInt *Upper =
2202         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2203     ConstantRange Range(Lower->getValue(), Upper->getValue());
2204     if (Range.contains(Value))
2205       return false;
2206   }
2207   return true;
2208 }
2209 
2210 /// Return true if the given value is known to be non-zero when defined. For
2211 /// vectors, return true if every demanded element is known to be non-zero when
2212 /// defined. For pointers, if the context instruction and dominator tree are
2213 /// specified, perform context-sensitive analysis and return true if the
2214 /// pointer couldn't possibly be null at the specified instruction.
2215 /// Supports values with integer or pointer type and vectors of integers.
2216 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2217                     const Query &Q) {
2218   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2219   // vector
2220   if (isa<ScalableVectorType>(V->getType()))
2221     return false;
2222 
2223   if (auto *C = dyn_cast<Constant>(V)) {
2224     if (C->isNullValue())
2225       return false;
2226     if (isa<ConstantInt>(C))
2227       // Must be non-zero due to null test above.
2228       return true;
2229 
2230     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2231       // See the comment for IntToPtr/PtrToInt instructions below.
2232       if (CE->getOpcode() == Instruction::IntToPtr ||
2233           CE->getOpcode() == Instruction::PtrToInt)
2234         if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2235                 .getFixedSize() <=
2236             Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2237           return isKnownNonZero(CE->getOperand(0), Depth, Q);
2238     }
2239 
2240     // For constant vectors, check that all elements are undefined or known
2241     // non-zero to determine that the whole vector is known non-zero.
2242     if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2243       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2244         if (!DemandedElts[i])
2245           continue;
2246         Constant *Elt = C->getAggregateElement(i);
2247         if (!Elt || Elt->isNullValue())
2248           return false;
2249         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2250           return false;
2251       }
2252       return true;
2253     }
2254 
2255     // A global variable in address space 0 is non null unless extern weak
2256     // or an absolute symbol reference. Other address spaces may have null as a
2257     // valid address for a global, so we can't assume anything.
2258     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2259       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2260           GV->getType()->getAddressSpace() == 0)
2261         return true;
2262     } else
2263       return false;
2264   }
2265 
2266   if (auto *I = dyn_cast<Instruction>(V)) {
2267     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2268       // If the possible ranges don't contain zero, then the value is
2269       // definitely non-zero.
2270       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2271         const APInt ZeroValue(Ty->getBitWidth(), 0);
2272         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2273           return true;
2274       }
2275     }
2276   }
2277 
2278   if (isKnownNonZeroFromAssume(V, Q))
2279     return true;
2280 
2281   // Some of the tests below are recursive, so bail out if we hit the limit.
2282   if (Depth++ >= MaxAnalysisRecursionDepth)
2283     return false;
2284 
2285   // Check for pointer simplifications.
2286 
2287   if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2288     // Alloca never returns null, malloc might.
2289     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2290       return true;
2291 
2292     // A byval, inalloca may not be null in a non-default addres space. A
2293     // nonnull argument is assumed never 0.
2294     if (const Argument *A = dyn_cast<Argument>(V)) {
2295       if (((A->hasPassPointeeByValueCopyAttr() &&
2296             !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2297            A->hasNonNullAttr()))
2298         return true;
2299     }
2300 
2301     // A Load tagged with nonnull metadata is never null.
2302     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2303       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2304         return true;
2305 
2306     if (const auto *Call = dyn_cast<CallBase>(V)) {
2307       if (Call->isReturnNonNull())
2308         return true;
2309       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2310         return isKnownNonZero(RP, Depth, Q);
2311     }
2312   }
2313 
2314   if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2315     return true;
2316 
2317   // Check for recursive pointer simplifications.
2318   if (V->getType()->isPointerTy()) {
2319     // Look through bitcast operations, GEPs, and int2ptr instructions as they
2320     // do not alter the value, or at least not the nullness property of the
2321     // value, e.g., int2ptr is allowed to zero/sign extend the value.
2322     //
2323     // Note that we have to take special care to avoid looking through
2324     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2325     // as casts that can alter the value, e.g., AddrSpaceCasts.
2326     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2327       return isGEPKnownNonNull(GEP, Depth, Q);
2328 
2329     if (auto *BCO = dyn_cast<BitCastOperator>(V))
2330       return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2331 
2332     if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2333       if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2334           Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2335         return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2336   }
2337 
2338   // Similar to int2ptr above, we can look through ptr2int here if the cast
2339   // is a no-op or an extend and not a truncate.
2340   if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2341     if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2342         Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2343       return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2344 
2345   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2346 
2347   // X | Y != 0 if X != 0 or Y != 0.
2348   Value *X = nullptr, *Y = nullptr;
2349   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2350     return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2351            isKnownNonZero(Y, DemandedElts, Depth, Q);
2352 
2353   // ext X != 0 if X != 0.
2354   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2355     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2356 
2357   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2358   // if the lowest bit is shifted off the end.
2359   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2360     // shl nuw can't remove any non-zero bits.
2361     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2362     if (Q.IIQ.hasNoUnsignedWrap(BO))
2363       return isKnownNonZero(X, Depth, Q);
2364 
2365     KnownBits Known(BitWidth);
2366     computeKnownBits(X, DemandedElts, Known, Depth, Q);
2367     if (Known.One[0])
2368       return true;
2369   }
2370   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2371   // defined if the sign bit is shifted off the end.
2372   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2373     // shr exact can only shift out zero bits.
2374     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2375     if (BO->isExact())
2376       return isKnownNonZero(X, Depth, Q);
2377 
2378     KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2379     if (Known.isNegative())
2380       return true;
2381 
2382     // If the shifter operand is a constant, and all of the bits shifted
2383     // out are known to be zero, and X is known non-zero then at least one
2384     // non-zero bit must remain.
2385     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2386       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2387       // Is there a known one in the portion not shifted out?
2388       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2389         return true;
2390       // Are all the bits to be shifted out known zero?
2391       if (Known.countMinTrailingZeros() >= ShiftVal)
2392         return isKnownNonZero(X, DemandedElts, Depth, Q);
2393     }
2394   }
2395   // div exact can only produce a zero if the dividend is zero.
2396   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2397     return isKnownNonZero(X, DemandedElts, Depth, Q);
2398   }
2399   // X + Y.
2400   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2401     KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2402     KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2403 
2404     // If X and Y are both non-negative (as signed values) then their sum is not
2405     // zero unless both X and Y are zero.
2406     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2407       if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2408           isKnownNonZero(Y, DemandedElts, Depth, Q))
2409         return true;
2410 
2411     // If X and Y are both negative (as signed values) then their sum is not
2412     // zero unless both X and Y equal INT_MIN.
2413     if (XKnown.isNegative() && YKnown.isNegative()) {
2414       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2415       // The sign bit of X is set.  If some other bit is set then X is not equal
2416       // to INT_MIN.
2417       if (XKnown.One.intersects(Mask))
2418         return true;
2419       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2420       // to INT_MIN.
2421       if (YKnown.One.intersects(Mask))
2422         return true;
2423     }
2424 
2425     // The sum of a non-negative number and a power of two is not zero.
2426     if (XKnown.isNonNegative() &&
2427         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2428       return true;
2429     if (YKnown.isNonNegative() &&
2430         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2431       return true;
2432   }
2433   // X * Y.
2434   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2435     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2436     // If X and Y are non-zero then so is X * Y as long as the multiplication
2437     // does not overflow.
2438     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2439         isKnownNonZero(X, DemandedElts, Depth, Q) &&
2440         isKnownNonZero(Y, DemandedElts, Depth, Q))
2441       return true;
2442   }
2443   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2444   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2445     if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2446         isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2447       return true;
2448   }
2449   // PHI
2450   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2451     // Try and detect a recurrence that monotonically increases from a
2452     // starting value, as these are common as induction variables.
2453     if (PN->getNumIncomingValues() == 2) {
2454       Value *Start = PN->getIncomingValue(0);
2455       Value *Induction = PN->getIncomingValue(1);
2456       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2457         std::swap(Start, Induction);
2458       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2459         if (!C->isZero() && !C->isNegative()) {
2460           ConstantInt *X;
2461           if (Q.IIQ.UseInstrInfo &&
2462               (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2463                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2464               !X->isNegative())
2465             return true;
2466         }
2467       }
2468     }
2469     // Check if all incoming values are non-zero using recursion.
2470     Query RecQ = Q;
2471     unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2472     return llvm::all_of(PN->operands(), [&](const Use &U) {
2473       if (U.get() == PN)
2474         return true;
2475       RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2476       return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2477     });
2478   }
2479   // ExtractElement
2480   else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2481     const Value *Vec = EEI->getVectorOperand();
2482     const Value *Idx = EEI->getIndexOperand();
2483     auto *CIdx = dyn_cast<ConstantInt>(Idx);
2484     if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2485       unsigned NumElts = VecTy->getNumElements();
2486       APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2487       if (CIdx && CIdx->getValue().ult(NumElts))
2488         DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2489       return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2490     }
2491   }
2492   // Freeze
2493   else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2494     auto *Op = FI->getOperand(0);
2495     if (isKnownNonZero(Op, Depth, Q) &&
2496         isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2497       return true;
2498   }
2499 
2500   KnownBits Known(BitWidth);
2501   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2502   return Known.One != 0;
2503 }
2504 
2505 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2506   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2507   // vector
2508   if (isa<ScalableVectorType>(V->getType()))
2509     return false;
2510 
2511   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2512   APInt DemandedElts =
2513       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2514   return isKnownNonZero(V, DemandedElts, Depth, Q);
2515 }
2516 
2517 /// Return true if V2 == V1 + X, where X is known non-zero.
2518 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2519                            const Query &Q) {
2520   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2521   if (!BO || BO->getOpcode() != Instruction::Add)
2522     return false;
2523   Value *Op = nullptr;
2524   if (V2 == BO->getOperand(0))
2525     Op = BO->getOperand(1);
2526   else if (V2 == BO->getOperand(1))
2527     Op = BO->getOperand(0);
2528   else
2529     return false;
2530   return isKnownNonZero(Op, Depth + 1, Q);
2531 }
2532 
2533 
2534 /// Return true if it is known that V1 != V2.
2535 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2536                             const Query &Q) {
2537   if (V1 == V2)
2538     return false;
2539   if (V1->getType() != V2->getType())
2540     // We can't look through casts yet.
2541     return false;
2542 
2543   if (Depth >= MaxAnalysisRecursionDepth)
2544     return false;
2545 
2546   // See if we can recurse through (exactly one of) our operands.  This
2547   // requires our operation be 1-to-1 and map every input value to exactly
2548   // one output value.  Such an operation is invertible.
2549   auto *O1 = dyn_cast<Operator>(V1);
2550   auto *O2 = dyn_cast<Operator>(V2);
2551   if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2552     switch (O1->getOpcode()) {
2553     default: break;
2554     case Instruction::Add:
2555     case Instruction::Sub:
2556       // Assume operand order has been canonicalized
2557       if (O1->getOperand(0) == O2->getOperand(0))
2558         return isKnownNonEqual(O1->getOperand(1), O2->getOperand(1),
2559                                Depth + 1, Q);
2560       if (O1->getOperand(1) == O2->getOperand(1))
2561         return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2562                                Depth + 1, Q);
2563       break;
2564     case Instruction::Mul: {
2565       // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2566       // and N is the bitwdith.  The nsw case is non-obvious, but proven by
2567       // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2568       auto *OBO1 = cast<OverflowingBinaryOperator>(O1);
2569       auto *OBO2 = cast<OverflowingBinaryOperator>(O2);
2570       if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2571           (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2572         break;
2573 
2574       // Assume operand order has been canonicalized
2575       if (O1->getOperand(1) == O2->getOperand(1) &&
2576           isa<ConstantInt>(O1->getOperand(1)) &&
2577           !cast<ConstantInt>(O1->getOperand(1))->isZero())
2578         return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2579                                Depth + 1, Q);
2580       break;
2581     }
2582     case Instruction::SExt:
2583     case Instruction::ZExt:
2584       if (O1->getOperand(0)->getType() == O2->getOperand(0)->getType())
2585         return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2586                                Depth + 1, Q);
2587       break;
2588     };
2589   }
2590 
2591   if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2592     return true;
2593 
2594   if (V1->getType()->isIntOrIntVectorTy()) {
2595     // Are any known bits in V1 contradictory to known bits in V2? If V1
2596     // has a known zero where V2 has a known one, they must not be equal.
2597     KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2598     KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2599 
2600     if (Known1.Zero.intersects(Known2.One) ||
2601         Known2.Zero.intersects(Known1.One))
2602       return true;
2603   }
2604   return false;
2605 }
2606 
2607 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2608 /// simplify operations downstream. Mask is known to be zero for bits that V
2609 /// cannot have.
2610 ///
2611 /// This function is defined on values with integer type, values with pointer
2612 /// type, and vectors of integers.  In the case
2613 /// where V is a vector, the mask, known zero, and known one values are the
2614 /// same width as the vector element, and the bit is set only if it is true
2615 /// for all of the elements in the vector.
2616 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2617                        const Query &Q) {
2618   KnownBits Known(Mask.getBitWidth());
2619   computeKnownBits(V, Known, Depth, Q);
2620   return Mask.isSubsetOf(Known.Zero);
2621 }
2622 
2623 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2624 // Returns the input and lower/upper bounds.
2625 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2626                                 const APInt *&CLow, const APInt *&CHigh) {
2627   assert(isa<Operator>(Select) &&
2628          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2629          "Input should be a Select!");
2630 
2631   const Value *LHS = nullptr, *RHS = nullptr;
2632   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2633   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2634     return false;
2635 
2636   if (!match(RHS, m_APInt(CLow)))
2637     return false;
2638 
2639   const Value *LHS2 = nullptr, *RHS2 = nullptr;
2640   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2641   if (getInverseMinMaxFlavor(SPF) != SPF2)
2642     return false;
2643 
2644   if (!match(RHS2, m_APInt(CHigh)))
2645     return false;
2646 
2647   if (SPF == SPF_SMIN)
2648     std::swap(CLow, CHigh);
2649 
2650   In = LHS2;
2651   return CLow->sle(*CHigh);
2652 }
2653 
2654 /// For vector constants, loop over the elements and find the constant with the
2655 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2656 /// or if any element was not analyzed; otherwise, return the count for the
2657 /// element with the minimum number of sign bits.
2658 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2659                                                  const APInt &DemandedElts,
2660                                                  unsigned TyBits) {
2661   const auto *CV = dyn_cast<Constant>(V);
2662   if (!CV || !isa<FixedVectorType>(CV->getType()))
2663     return 0;
2664 
2665   unsigned MinSignBits = TyBits;
2666   unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2667   for (unsigned i = 0; i != NumElts; ++i) {
2668     if (!DemandedElts[i])
2669       continue;
2670     // If we find a non-ConstantInt, bail out.
2671     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2672     if (!Elt)
2673       return 0;
2674 
2675     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2676   }
2677 
2678   return MinSignBits;
2679 }
2680 
2681 static unsigned ComputeNumSignBitsImpl(const Value *V,
2682                                        const APInt &DemandedElts,
2683                                        unsigned Depth, const Query &Q);
2684 
2685 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2686                                    unsigned Depth, const Query &Q) {
2687   unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2688   assert(Result > 0 && "At least one sign bit needs to be present!");
2689   return Result;
2690 }
2691 
2692 /// Return the number of times the sign bit of the register is replicated into
2693 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2694 /// (itself), but other cases can give us information. For example, immediately
2695 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2696 /// other, so we return 3. For vectors, return the number of sign bits for the
2697 /// vector element with the minimum number of known sign bits of the demanded
2698 /// elements in the vector specified by DemandedElts.
2699 static unsigned ComputeNumSignBitsImpl(const Value *V,
2700                                        const APInt &DemandedElts,
2701                                        unsigned Depth, const Query &Q) {
2702   Type *Ty = V->getType();
2703 
2704   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2705   // vector
2706   if (isa<ScalableVectorType>(Ty))
2707     return 1;
2708 
2709 #ifndef NDEBUG
2710   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2711 
2712   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2713     assert(
2714         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2715         "DemandedElt width should equal the fixed vector number of elements");
2716   } else {
2717     assert(DemandedElts == APInt(1, 1) &&
2718            "DemandedElt width should be 1 for scalars");
2719   }
2720 #endif
2721 
2722   // We return the minimum number of sign bits that are guaranteed to be present
2723   // in V, so for undef we have to conservatively return 1.  We don't have the
2724   // same behavior for poison though -- that's a FIXME today.
2725 
2726   Type *ScalarTy = Ty->getScalarType();
2727   unsigned TyBits = ScalarTy->isPointerTy() ?
2728     Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2729     Q.DL.getTypeSizeInBits(ScalarTy);
2730 
2731   unsigned Tmp, Tmp2;
2732   unsigned FirstAnswer = 1;
2733 
2734   // Note that ConstantInt is handled by the general computeKnownBits case
2735   // below.
2736 
2737   if (Depth == MaxAnalysisRecursionDepth)
2738     return 1;
2739 
2740   if (auto *U = dyn_cast<Operator>(V)) {
2741     switch (Operator::getOpcode(V)) {
2742     default: break;
2743     case Instruction::SExt:
2744       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2745       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2746 
2747     case Instruction::SDiv: {
2748       const APInt *Denominator;
2749       // sdiv X, C -> adds log(C) sign bits.
2750       if (match(U->getOperand(1), m_APInt(Denominator))) {
2751 
2752         // Ignore non-positive denominator.
2753         if (!Denominator->isStrictlyPositive())
2754           break;
2755 
2756         // Calculate the incoming numerator bits.
2757         unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2758 
2759         // Add floor(log(C)) bits to the numerator bits.
2760         return std::min(TyBits, NumBits + Denominator->logBase2());
2761       }
2762       break;
2763     }
2764 
2765     case Instruction::SRem: {
2766       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2767 
2768       const APInt *Denominator;
2769       // srem X, C -> we know that the result is within [-C+1,C) when C is a
2770       // positive constant.  This let us put a lower bound on the number of sign
2771       // bits.
2772       if (match(U->getOperand(1), m_APInt(Denominator))) {
2773 
2774         // Ignore non-positive denominator.
2775         if (Denominator->isStrictlyPositive()) {
2776           // Calculate the leading sign bit constraints by examining the
2777           // denominator.  Given that the denominator is positive, there are two
2778           // cases:
2779           //
2780           //  1. The numerator is positive. The result range is [0,C) and
2781           //     [0,C) u< (1 << ceilLogBase2(C)).
2782           //
2783           //  2. The numerator is negative. Then the result range is (-C,0] and
2784           //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2785           //
2786           // Thus a lower bound on the number of sign bits is `TyBits -
2787           // ceilLogBase2(C)`.
2788 
2789           unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2790           Tmp = std::max(Tmp, ResBits);
2791         }
2792       }
2793       return Tmp;
2794     }
2795 
2796     case Instruction::AShr: {
2797       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2798       // ashr X, C   -> adds C sign bits.  Vectors too.
2799       const APInt *ShAmt;
2800       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2801         if (ShAmt->uge(TyBits))
2802           break; // Bad shift.
2803         unsigned ShAmtLimited = ShAmt->getZExtValue();
2804         Tmp += ShAmtLimited;
2805         if (Tmp > TyBits) Tmp = TyBits;
2806       }
2807       return Tmp;
2808     }
2809     case Instruction::Shl: {
2810       const APInt *ShAmt;
2811       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2812         // shl destroys sign bits.
2813         Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2814         if (ShAmt->uge(TyBits) ||   // Bad shift.
2815             ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2816         Tmp2 = ShAmt->getZExtValue();
2817         return Tmp - Tmp2;
2818       }
2819       break;
2820     }
2821     case Instruction::And:
2822     case Instruction::Or:
2823     case Instruction::Xor: // NOT is handled here.
2824       // Logical binary ops preserve the number of sign bits at the worst.
2825       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2826       if (Tmp != 1) {
2827         Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2828         FirstAnswer = std::min(Tmp, Tmp2);
2829         // We computed what we know about the sign bits as our first
2830         // answer. Now proceed to the generic code that uses
2831         // computeKnownBits, and pick whichever answer is better.
2832       }
2833       break;
2834 
2835     case Instruction::Select: {
2836       // If we have a clamp pattern, we know that the number of sign bits will
2837       // be the minimum of the clamp min/max range.
2838       const Value *X;
2839       const APInt *CLow, *CHigh;
2840       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2841         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2842 
2843       Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2844       if (Tmp == 1) break;
2845       Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2846       return std::min(Tmp, Tmp2);
2847     }
2848 
2849     case Instruction::Add:
2850       // Add can have at most one carry bit.  Thus we know that the output
2851       // is, at worst, one more bit than the inputs.
2852       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2853       if (Tmp == 1) break;
2854 
2855       // Special case decrementing a value (ADD X, -1):
2856       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2857         if (CRHS->isAllOnesValue()) {
2858           KnownBits Known(TyBits);
2859           computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2860 
2861           // If the input is known to be 0 or 1, the output is 0/-1, which is
2862           // all sign bits set.
2863           if ((Known.Zero | 1).isAllOnesValue())
2864             return TyBits;
2865 
2866           // If we are subtracting one from a positive number, there is no carry
2867           // out of the result.
2868           if (Known.isNonNegative())
2869             return Tmp;
2870         }
2871 
2872       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2873       if (Tmp2 == 1) break;
2874       return std::min(Tmp, Tmp2) - 1;
2875 
2876     case Instruction::Sub:
2877       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2878       if (Tmp2 == 1) break;
2879 
2880       // Handle NEG.
2881       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2882         if (CLHS->isNullValue()) {
2883           KnownBits Known(TyBits);
2884           computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2885           // If the input is known to be 0 or 1, the output is 0/-1, which is
2886           // all sign bits set.
2887           if ((Known.Zero | 1).isAllOnesValue())
2888             return TyBits;
2889 
2890           // If the input is known to be positive (the sign bit is known clear),
2891           // the output of the NEG has the same number of sign bits as the
2892           // input.
2893           if (Known.isNonNegative())
2894             return Tmp2;
2895 
2896           // Otherwise, we treat this like a SUB.
2897         }
2898 
2899       // Sub can have at most one carry bit.  Thus we know that the output
2900       // is, at worst, one more bit than the inputs.
2901       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2902       if (Tmp == 1) break;
2903       return std::min(Tmp, Tmp2) - 1;
2904 
2905     case Instruction::Mul: {
2906       // The output of the Mul can be at most twice the valid bits in the
2907       // inputs.
2908       unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2909       if (SignBitsOp0 == 1) break;
2910       unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2911       if (SignBitsOp1 == 1) break;
2912       unsigned OutValidBits =
2913           (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2914       return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2915     }
2916 
2917     case Instruction::PHI: {
2918       const PHINode *PN = cast<PHINode>(U);
2919       unsigned NumIncomingValues = PN->getNumIncomingValues();
2920       // Don't analyze large in-degree PHIs.
2921       if (NumIncomingValues > 4) break;
2922       // Unreachable blocks may have zero-operand PHI nodes.
2923       if (NumIncomingValues == 0) break;
2924 
2925       // Take the minimum of all incoming values.  This can't infinitely loop
2926       // because of our depth threshold.
2927       Query RecQ = Q;
2928       Tmp = TyBits;
2929       for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
2930         if (Tmp == 1) return Tmp;
2931         RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
2932         Tmp = std::min(
2933             Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
2934       }
2935       return Tmp;
2936     }
2937 
2938     case Instruction::Trunc:
2939       // FIXME: it's tricky to do anything useful for this, but it is an
2940       // important case for targets like X86.
2941       break;
2942 
2943     case Instruction::ExtractElement:
2944       // Look through extract element. At the moment we keep this simple and
2945       // skip tracking the specific element. But at least we might find
2946       // information valid for all elements of the vector (for example if vector
2947       // is sign extended, shifted, etc).
2948       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2949 
2950     case Instruction::ShuffleVector: {
2951       // Collect the minimum number of sign bits that are shared by every vector
2952       // element referenced by the shuffle.
2953       auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
2954       if (!Shuf) {
2955         // FIXME: Add support for shufflevector constant expressions.
2956         return 1;
2957       }
2958       APInt DemandedLHS, DemandedRHS;
2959       // For undef elements, we don't know anything about the common state of
2960       // the shuffle result.
2961       if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
2962         return 1;
2963       Tmp = std::numeric_limits<unsigned>::max();
2964       if (!!DemandedLHS) {
2965         const Value *LHS = Shuf->getOperand(0);
2966         Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
2967       }
2968       // If we don't know anything, early out and try computeKnownBits
2969       // fall-back.
2970       if (Tmp == 1)
2971         break;
2972       if (!!DemandedRHS) {
2973         const Value *RHS = Shuf->getOperand(1);
2974         Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
2975         Tmp = std::min(Tmp, Tmp2);
2976       }
2977       // If we don't know anything, early out and try computeKnownBits
2978       // fall-back.
2979       if (Tmp == 1)
2980         break;
2981       assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
2982       return Tmp;
2983     }
2984     case Instruction::Call: {
2985       if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
2986         switch (II->getIntrinsicID()) {
2987         default: break;
2988         case Intrinsic::abs:
2989           Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2990           if (Tmp == 1) break;
2991 
2992           // Absolute value reduces number of sign bits by at most 1.
2993           return Tmp - 1;
2994         }
2995       }
2996     }
2997     }
2998   }
2999 
3000   // Finally, if we can prove that the top bits of the result are 0's or 1's,
3001   // use this information.
3002 
3003   // If we can examine all elements of a vector constant successfully, we're
3004   // done (we can't do any better than that). If not, keep trying.
3005   if (unsigned VecSignBits =
3006           computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3007     return VecSignBits;
3008 
3009   KnownBits Known(TyBits);
3010   computeKnownBits(V, DemandedElts, Known, Depth, Q);
3011 
3012   // If we know that the sign bit is either zero or one, determine the number of
3013   // identical bits in the top of the input value.
3014   return std::max(FirstAnswer, Known.countMinSignBits());
3015 }
3016 
3017 /// This function computes the integer multiple of Base that equals V.
3018 /// If successful, it returns true and returns the multiple in
3019 /// Multiple. If unsuccessful, it returns false. It looks
3020 /// through SExt instructions only if LookThroughSExt is true.
3021 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
3022                            bool LookThroughSExt, unsigned Depth) {
3023   assert(V && "No Value?");
3024   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3025   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
3026 
3027   Type *T = V->getType();
3028 
3029   ConstantInt *CI = dyn_cast<ConstantInt>(V);
3030 
3031   if (Base == 0)
3032     return false;
3033 
3034   if (Base == 1) {
3035     Multiple = V;
3036     return true;
3037   }
3038 
3039   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3040   Constant *BaseVal = ConstantInt::get(T, Base);
3041   if (CO && CO == BaseVal) {
3042     // Multiple is 1.
3043     Multiple = ConstantInt::get(T, 1);
3044     return true;
3045   }
3046 
3047   if (CI && CI->getZExtValue() % Base == 0) {
3048     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3049     return true;
3050   }
3051 
3052   if (Depth == MaxAnalysisRecursionDepth) return false;
3053 
3054   Operator *I = dyn_cast<Operator>(V);
3055   if (!I) return false;
3056 
3057   switch (I->getOpcode()) {
3058   default: break;
3059   case Instruction::SExt:
3060     if (!LookThroughSExt) return false;
3061     // otherwise fall through to ZExt
3062     LLVM_FALLTHROUGH;
3063   case Instruction::ZExt:
3064     return ComputeMultiple(I->getOperand(0), Base, Multiple,
3065                            LookThroughSExt, Depth+1);
3066   case Instruction::Shl:
3067   case Instruction::Mul: {
3068     Value *Op0 = I->getOperand(0);
3069     Value *Op1 = I->getOperand(1);
3070 
3071     if (I->getOpcode() == Instruction::Shl) {
3072       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3073       if (!Op1CI) return false;
3074       // Turn Op0 << Op1 into Op0 * 2^Op1
3075       APInt Op1Int = Op1CI->getValue();
3076       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3077       APInt API(Op1Int.getBitWidth(), 0);
3078       API.setBit(BitToSet);
3079       Op1 = ConstantInt::get(V->getContext(), API);
3080     }
3081 
3082     Value *Mul0 = nullptr;
3083     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3084       if (Constant *Op1C = dyn_cast<Constant>(Op1))
3085         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3086           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3087               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3088             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3089           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3090               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3091             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3092 
3093           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3094           Multiple = ConstantExpr::getMul(MulC, Op1C);
3095           return true;
3096         }
3097 
3098       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3099         if (Mul0CI->getValue() == 1) {
3100           // V == Base * Op1, so return Op1
3101           Multiple = Op1;
3102           return true;
3103         }
3104     }
3105 
3106     Value *Mul1 = nullptr;
3107     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3108       if (Constant *Op0C = dyn_cast<Constant>(Op0))
3109         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3110           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3111               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3112             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3113           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3114               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3115             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3116 
3117           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3118           Multiple = ConstantExpr::getMul(MulC, Op0C);
3119           return true;
3120         }
3121 
3122       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3123         if (Mul1CI->getValue() == 1) {
3124           // V == Base * Op0, so return Op0
3125           Multiple = Op0;
3126           return true;
3127         }
3128     }
3129   }
3130   }
3131 
3132   // We could not determine if V is a multiple of Base.
3133   return false;
3134 }
3135 
3136 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3137                                             const TargetLibraryInfo *TLI) {
3138   const Function *F = CB.getCalledFunction();
3139   if (!F)
3140     return Intrinsic::not_intrinsic;
3141 
3142   if (F->isIntrinsic())
3143     return F->getIntrinsicID();
3144 
3145   // We are going to infer semantics of a library function based on mapping it
3146   // to an LLVM intrinsic. Check that the library function is available from
3147   // this callbase and in this environment.
3148   LibFunc Func;
3149   if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3150       !CB.onlyReadsMemory())
3151     return Intrinsic::not_intrinsic;
3152 
3153   switch (Func) {
3154   default:
3155     break;
3156   case LibFunc_sin:
3157   case LibFunc_sinf:
3158   case LibFunc_sinl:
3159     return Intrinsic::sin;
3160   case LibFunc_cos:
3161   case LibFunc_cosf:
3162   case LibFunc_cosl:
3163     return Intrinsic::cos;
3164   case LibFunc_exp:
3165   case LibFunc_expf:
3166   case LibFunc_expl:
3167     return Intrinsic::exp;
3168   case LibFunc_exp2:
3169   case LibFunc_exp2f:
3170   case LibFunc_exp2l:
3171     return Intrinsic::exp2;
3172   case LibFunc_log:
3173   case LibFunc_logf:
3174   case LibFunc_logl:
3175     return Intrinsic::log;
3176   case LibFunc_log10:
3177   case LibFunc_log10f:
3178   case LibFunc_log10l:
3179     return Intrinsic::log10;
3180   case LibFunc_log2:
3181   case LibFunc_log2f:
3182   case LibFunc_log2l:
3183     return Intrinsic::log2;
3184   case LibFunc_fabs:
3185   case LibFunc_fabsf:
3186   case LibFunc_fabsl:
3187     return Intrinsic::fabs;
3188   case LibFunc_fmin:
3189   case LibFunc_fminf:
3190   case LibFunc_fminl:
3191     return Intrinsic::minnum;
3192   case LibFunc_fmax:
3193   case LibFunc_fmaxf:
3194   case LibFunc_fmaxl:
3195     return Intrinsic::maxnum;
3196   case LibFunc_copysign:
3197   case LibFunc_copysignf:
3198   case LibFunc_copysignl:
3199     return Intrinsic::copysign;
3200   case LibFunc_floor:
3201   case LibFunc_floorf:
3202   case LibFunc_floorl:
3203     return Intrinsic::floor;
3204   case LibFunc_ceil:
3205   case LibFunc_ceilf:
3206   case LibFunc_ceill:
3207     return Intrinsic::ceil;
3208   case LibFunc_trunc:
3209   case LibFunc_truncf:
3210   case LibFunc_truncl:
3211     return Intrinsic::trunc;
3212   case LibFunc_rint:
3213   case LibFunc_rintf:
3214   case LibFunc_rintl:
3215     return Intrinsic::rint;
3216   case LibFunc_nearbyint:
3217   case LibFunc_nearbyintf:
3218   case LibFunc_nearbyintl:
3219     return Intrinsic::nearbyint;
3220   case LibFunc_round:
3221   case LibFunc_roundf:
3222   case LibFunc_roundl:
3223     return Intrinsic::round;
3224   case LibFunc_roundeven:
3225   case LibFunc_roundevenf:
3226   case LibFunc_roundevenl:
3227     return Intrinsic::roundeven;
3228   case LibFunc_pow:
3229   case LibFunc_powf:
3230   case LibFunc_powl:
3231     return Intrinsic::pow;
3232   case LibFunc_sqrt:
3233   case LibFunc_sqrtf:
3234   case LibFunc_sqrtl:
3235     return Intrinsic::sqrt;
3236   }
3237 
3238   return Intrinsic::not_intrinsic;
3239 }
3240 
3241 /// Return true if we can prove that the specified FP value is never equal to
3242 /// -0.0.
3243 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3244 ///       that a value is not -0.0. It only guarantees that -0.0 may be treated
3245 ///       the same as +0.0 in floating-point ops.
3246 ///
3247 /// NOTE: this function will need to be revisited when we support non-default
3248 /// rounding modes!
3249 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3250                                 unsigned Depth) {
3251   if (auto *CFP = dyn_cast<ConstantFP>(V))
3252     return !CFP->getValueAPF().isNegZero();
3253 
3254   if (Depth == MaxAnalysisRecursionDepth)
3255     return false;
3256 
3257   auto *Op = dyn_cast<Operator>(V);
3258   if (!Op)
3259     return false;
3260 
3261   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3262   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3263     return true;
3264 
3265   // sitofp and uitofp turn into +0.0 for zero.
3266   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3267     return true;
3268 
3269   if (auto *Call = dyn_cast<CallInst>(Op)) {
3270     Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3271     switch (IID) {
3272     default:
3273       break;
3274     // sqrt(-0.0) = -0.0, no other negative results are possible.
3275     case Intrinsic::sqrt:
3276     case Intrinsic::canonicalize:
3277       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3278     // fabs(x) != -0.0
3279     case Intrinsic::fabs:
3280       return true;
3281     }
3282   }
3283 
3284   return false;
3285 }
3286 
3287 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3288 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3289 /// bit despite comparing equal.
3290 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3291                                             const TargetLibraryInfo *TLI,
3292                                             bool SignBitOnly,
3293                                             unsigned Depth) {
3294   // TODO: This function does not do the right thing when SignBitOnly is true
3295   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3296   // which flips the sign bits of NaNs.  See
3297   // https://llvm.org/bugs/show_bug.cgi?id=31702.
3298 
3299   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3300     return !CFP->getValueAPF().isNegative() ||
3301            (!SignBitOnly && CFP->getValueAPF().isZero());
3302   }
3303 
3304   // Handle vector of constants.
3305   if (auto *CV = dyn_cast<Constant>(V)) {
3306     if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3307       unsigned NumElts = CVFVTy->getNumElements();
3308       for (unsigned i = 0; i != NumElts; ++i) {
3309         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3310         if (!CFP)
3311           return false;
3312         if (CFP->getValueAPF().isNegative() &&
3313             (SignBitOnly || !CFP->getValueAPF().isZero()))
3314           return false;
3315       }
3316 
3317       // All non-negative ConstantFPs.
3318       return true;
3319     }
3320   }
3321 
3322   if (Depth == MaxAnalysisRecursionDepth)
3323     return false;
3324 
3325   const Operator *I = dyn_cast<Operator>(V);
3326   if (!I)
3327     return false;
3328 
3329   switch (I->getOpcode()) {
3330   default:
3331     break;
3332   // Unsigned integers are always nonnegative.
3333   case Instruction::UIToFP:
3334     return true;
3335   case Instruction::FMul:
3336   case Instruction::FDiv:
3337     // X * X is always non-negative or a NaN.
3338     // X / X is always exactly 1.0 or a NaN.
3339     if (I->getOperand(0) == I->getOperand(1) &&
3340         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3341       return true;
3342 
3343     LLVM_FALLTHROUGH;
3344   case Instruction::FAdd:
3345   case Instruction::FRem:
3346     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3347                                            Depth + 1) &&
3348            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3349                                            Depth + 1);
3350   case Instruction::Select:
3351     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3352                                            Depth + 1) &&
3353            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3354                                            Depth + 1);
3355   case Instruction::FPExt:
3356   case Instruction::FPTrunc:
3357     // Widening/narrowing never change sign.
3358     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3359                                            Depth + 1);
3360   case Instruction::ExtractElement:
3361     // Look through extract element. At the moment we keep this simple and skip
3362     // tracking the specific element. But at least we might find information
3363     // valid for all elements of the vector.
3364     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3365                                            Depth + 1);
3366   case Instruction::Call:
3367     const auto *CI = cast<CallInst>(I);
3368     Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3369     switch (IID) {
3370     default:
3371       break;
3372     case Intrinsic::maxnum: {
3373       Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3374       auto isPositiveNum = [&](Value *V) {
3375         if (SignBitOnly) {
3376           // With SignBitOnly, this is tricky because the result of
3377           // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3378           // a constant strictly greater than 0.0.
3379           const APFloat *C;
3380           return match(V, m_APFloat(C)) &&
3381                  *C > APFloat::getZero(C->getSemantics());
3382         }
3383 
3384         // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3385         // maxnum can't be ordered-less-than-zero.
3386         return isKnownNeverNaN(V, TLI) &&
3387                cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3388       };
3389 
3390       // TODO: This could be improved. We could also check that neither operand
3391       //       has its sign bit set (and at least 1 is not-NAN?).
3392       return isPositiveNum(V0) || isPositiveNum(V1);
3393     }
3394 
3395     case Intrinsic::maximum:
3396       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3397                                              Depth + 1) ||
3398              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3399                                              Depth + 1);
3400     case Intrinsic::minnum:
3401     case Intrinsic::minimum:
3402       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3403                                              Depth + 1) &&
3404              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3405                                              Depth + 1);
3406     case Intrinsic::exp:
3407     case Intrinsic::exp2:
3408     case Intrinsic::fabs:
3409       return true;
3410 
3411     case Intrinsic::sqrt:
3412       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3413       if (!SignBitOnly)
3414         return true;
3415       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3416                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3417 
3418     case Intrinsic::powi:
3419       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3420         // powi(x,n) is non-negative if n is even.
3421         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3422           return true;
3423       }
3424       // TODO: This is not correct.  Given that exp is an integer, here are the
3425       // ways that pow can return a negative value:
3426       //
3427       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3428       //   pow(-0, exp)   --> -inf if exp is negative odd.
3429       //   pow(-0, exp)   --> -0 if exp is positive odd.
3430       //   pow(-inf, exp) --> -0 if exp is negative odd.
3431       //   pow(-inf, exp) --> -inf if exp is positive odd.
3432       //
3433       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3434       // but we must return false if x == -0.  Unfortunately we do not currently
3435       // have a way of expressing this constraint.  See details in
3436       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3437       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3438                                              Depth + 1);
3439 
3440     case Intrinsic::fma:
3441     case Intrinsic::fmuladd:
3442       // x*x+y is non-negative if y is non-negative.
3443       return I->getOperand(0) == I->getOperand(1) &&
3444              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3445              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3446                                              Depth + 1);
3447     }
3448     break;
3449   }
3450   return false;
3451 }
3452 
3453 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3454                                        const TargetLibraryInfo *TLI) {
3455   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3456 }
3457 
3458 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3459   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3460 }
3461 
3462 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3463                                 unsigned Depth) {
3464   assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3465 
3466   // If we're told that infinities won't happen, assume they won't.
3467   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3468     if (FPMathOp->hasNoInfs())
3469       return true;
3470 
3471   // Handle scalar constants.
3472   if (auto *CFP = dyn_cast<ConstantFP>(V))
3473     return !CFP->isInfinity();
3474 
3475   if (Depth == MaxAnalysisRecursionDepth)
3476     return false;
3477 
3478   if (auto *Inst = dyn_cast<Instruction>(V)) {
3479     switch (Inst->getOpcode()) {
3480     case Instruction::Select: {
3481       return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3482              isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3483     }
3484     case Instruction::SIToFP:
3485     case Instruction::UIToFP: {
3486       // Get width of largest magnitude integer (remove a bit if signed).
3487       // This still works for a signed minimum value because the largest FP
3488       // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3489       int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3490       if (Inst->getOpcode() == Instruction::SIToFP)
3491         --IntSize;
3492 
3493       // If the exponent of the largest finite FP value can hold the largest
3494       // integer, the result of the cast must be finite.
3495       Type *FPTy = Inst->getType()->getScalarType();
3496       return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3497     }
3498     default:
3499       break;
3500     }
3501   }
3502 
3503   // try to handle fixed width vector constants
3504   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3505   if (VFVTy && isa<Constant>(V)) {
3506     // For vectors, verify that each element is not infinity.
3507     unsigned NumElts = VFVTy->getNumElements();
3508     for (unsigned i = 0; i != NumElts; ++i) {
3509       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3510       if (!Elt)
3511         return false;
3512       if (isa<UndefValue>(Elt))
3513         continue;
3514       auto *CElt = dyn_cast<ConstantFP>(Elt);
3515       if (!CElt || CElt->isInfinity())
3516         return false;
3517     }
3518     // All elements were confirmed non-infinity or undefined.
3519     return true;
3520   }
3521 
3522   // was not able to prove that V never contains infinity
3523   return false;
3524 }
3525 
3526 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3527                            unsigned Depth) {
3528   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3529 
3530   // If we're told that NaNs won't happen, assume they won't.
3531   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3532     if (FPMathOp->hasNoNaNs())
3533       return true;
3534 
3535   // Handle scalar constants.
3536   if (auto *CFP = dyn_cast<ConstantFP>(V))
3537     return !CFP->isNaN();
3538 
3539   if (Depth == MaxAnalysisRecursionDepth)
3540     return false;
3541 
3542   if (auto *Inst = dyn_cast<Instruction>(V)) {
3543     switch (Inst->getOpcode()) {
3544     case Instruction::FAdd:
3545     case Instruction::FSub:
3546       // Adding positive and negative infinity produces NaN.
3547       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3548              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3549              (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3550               isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3551 
3552     case Instruction::FMul:
3553       // Zero multiplied with infinity produces NaN.
3554       // FIXME: If neither side can be zero fmul never produces NaN.
3555       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3556              isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3557              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3558              isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3559 
3560     case Instruction::FDiv:
3561     case Instruction::FRem:
3562       // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3563       return false;
3564 
3565     case Instruction::Select: {
3566       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3567              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3568     }
3569     case Instruction::SIToFP:
3570     case Instruction::UIToFP:
3571       return true;
3572     case Instruction::FPTrunc:
3573     case Instruction::FPExt:
3574       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3575     default:
3576       break;
3577     }
3578   }
3579 
3580   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3581     switch (II->getIntrinsicID()) {
3582     case Intrinsic::canonicalize:
3583     case Intrinsic::fabs:
3584     case Intrinsic::copysign:
3585     case Intrinsic::exp:
3586     case Intrinsic::exp2:
3587     case Intrinsic::floor:
3588     case Intrinsic::ceil:
3589     case Intrinsic::trunc:
3590     case Intrinsic::rint:
3591     case Intrinsic::nearbyint:
3592     case Intrinsic::round:
3593     case Intrinsic::roundeven:
3594       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3595     case Intrinsic::sqrt:
3596       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3597              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3598     case Intrinsic::minnum:
3599     case Intrinsic::maxnum:
3600       // If either operand is not NaN, the result is not NaN.
3601       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3602              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3603     default:
3604       return false;
3605     }
3606   }
3607 
3608   // Try to handle fixed width vector constants
3609   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3610   if (VFVTy && isa<Constant>(V)) {
3611     // For vectors, verify that each element is not NaN.
3612     unsigned NumElts = VFVTy->getNumElements();
3613     for (unsigned i = 0; i != NumElts; ++i) {
3614       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3615       if (!Elt)
3616         return false;
3617       if (isa<UndefValue>(Elt))
3618         continue;
3619       auto *CElt = dyn_cast<ConstantFP>(Elt);
3620       if (!CElt || CElt->isNaN())
3621         return false;
3622     }
3623     // All elements were confirmed not-NaN or undefined.
3624     return true;
3625   }
3626 
3627   // Was not able to prove that V never contains NaN
3628   return false;
3629 }
3630 
3631 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3632 
3633   // All byte-wide stores are splatable, even of arbitrary variables.
3634   if (V->getType()->isIntegerTy(8))
3635     return V;
3636 
3637   LLVMContext &Ctx = V->getContext();
3638 
3639   // Undef don't care.
3640   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3641   if (isa<UndefValue>(V))
3642     return UndefInt8;
3643 
3644   // Return Undef for zero-sized type.
3645   if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3646     return UndefInt8;
3647 
3648   Constant *C = dyn_cast<Constant>(V);
3649   if (!C) {
3650     // Conceptually, we could handle things like:
3651     //   %a = zext i8 %X to i16
3652     //   %b = shl i16 %a, 8
3653     //   %c = or i16 %a, %b
3654     // but until there is an example that actually needs this, it doesn't seem
3655     // worth worrying about.
3656     return nullptr;
3657   }
3658 
3659   // Handle 'null' ConstantArrayZero etc.
3660   if (C->isNullValue())
3661     return Constant::getNullValue(Type::getInt8Ty(Ctx));
3662 
3663   // Constant floating-point values can be handled as integer values if the
3664   // corresponding integer value is "byteable".  An important case is 0.0.
3665   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3666     Type *Ty = nullptr;
3667     if (CFP->getType()->isHalfTy())
3668       Ty = Type::getInt16Ty(Ctx);
3669     else if (CFP->getType()->isFloatTy())
3670       Ty = Type::getInt32Ty(Ctx);
3671     else if (CFP->getType()->isDoubleTy())
3672       Ty = Type::getInt64Ty(Ctx);
3673     // Don't handle long double formats, which have strange constraints.
3674     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3675               : nullptr;
3676   }
3677 
3678   // We can handle constant integers that are multiple of 8 bits.
3679   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3680     if (CI->getBitWidth() % 8 == 0) {
3681       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3682       if (!CI->getValue().isSplat(8))
3683         return nullptr;
3684       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3685     }
3686   }
3687 
3688   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3689     if (CE->getOpcode() == Instruction::IntToPtr) {
3690       if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3691         unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3692         return isBytewiseValue(
3693             ConstantExpr::getIntegerCast(CE->getOperand(0),
3694                                          Type::getIntNTy(Ctx, BitWidth), false),
3695             DL);
3696       }
3697     }
3698   }
3699 
3700   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3701     if (LHS == RHS)
3702       return LHS;
3703     if (!LHS || !RHS)
3704       return nullptr;
3705     if (LHS == UndefInt8)
3706       return RHS;
3707     if (RHS == UndefInt8)
3708       return LHS;
3709     return nullptr;
3710   };
3711 
3712   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3713     Value *Val = UndefInt8;
3714     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3715       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3716         return nullptr;
3717     return Val;
3718   }
3719 
3720   if (isa<ConstantAggregate>(C)) {
3721     Value *Val = UndefInt8;
3722     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3723       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3724         return nullptr;
3725     return Val;
3726   }
3727 
3728   // Don't try to handle the handful of other constants.
3729   return nullptr;
3730 }
3731 
3732 // This is the recursive version of BuildSubAggregate. It takes a few different
3733 // arguments. Idxs is the index within the nested struct From that we are
3734 // looking at now (which is of type IndexedType). IdxSkip is the number of
3735 // indices from Idxs that should be left out when inserting into the resulting
3736 // struct. To is the result struct built so far, new insertvalue instructions
3737 // build on that.
3738 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3739                                 SmallVectorImpl<unsigned> &Idxs,
3740                                 unsigned IdxSkip,
3741                                 Instruction *InsertBefore) {
3742   StructType *STy = dyn_cast<StructType>(IndexedType);
3743   if (STy) {
3744     // Save the original To argument so we can modify it
3745     Value *OrigTo = To;
3746     // General case, the type indexed by Idxs is a struct
3747     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3748       // Process each struct element recursively
3749       Idxs.push_back(i);
3750       Value *PrevTo = To;
3751       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3752                              InsertBefore);
3753       Idxs.pop_back();
3754       if (!To) {
3755         // Couldn't find any inserted value for this index? Cleanup
3756         while (PrevTo != OrigTo) {
3757           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3758           PrevTo = Del->getAggregateOperand();
3759           Del->eraseFromParent();
3760         }
3761         // Stop processing elements
3762         break;
3763       }
3764     }
3765     // If we successfully found a value for each of our subaggregates
3766     if (To)
3767       return To;
3768   }
3769   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3770   // the struct's elements had a value that was inserted directly. In the latter
3771   // case, perhaps we can't determine each of the subelements individually, but
3772   // we might be able to find the complete struct somewhere.
3773 
3774   // Find the value that is at that particular spot
3775   Value *V = FindInsertedValue(From, Idxs);
3776 
3777   if (!V)
3778     return nullptr;
3779 
3780   // Insert the value in the new (sub) aggregate
3781   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3782                                  "tmp", InsertBefore);
3783 }
3784 
3785 // This helper takes a nested struct and extracts a part of it (which is again a
3786 // struct) into a new value. For example, given the struct:
3787 // { a, { b, { c, d }, e } }
3788 // and the indices "1, 1" this returns
3789 // { c, d }.
3790 //
3791 // It does this by inserting an insertvalue for each element in the resulting
3792 // struct, as opposed to just inserting a single struct. This will only work if
3793 // each of the elements of the substruct are known (ie, inserted into From by an
3794 // insertvalue instruction somewhere).
3795 //
3796 // All inserted insertvalue instructions are inserted before InsertBefore
3797 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3798                                 Instruction *InsertBefore) {
3799   assert(InsertBefore && "Must have someplace to insert!");
3800   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3801                                                              idx_range);
3802   Value *To = UndefValue::get(IndexedType);
3803   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3804   unsigned IdxSkip = Idxs.size();
3805 
3806   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3807 }
3808 
3809 /// Given an aggregate and a sequence of indices, see if the scalar value
3810 /// indexed is already around as a register, for example if it was inserted
3811 /// directly into the aggregate.
3812 ///
3813 /// If InsertBefore is not null, this function will duplicate (modified)
3814 /// insertvalues when a part of a nested struct is extracted.
3815 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3816                                Instruction *InsertBefore) {
3817   // Nothing to index? Just return V then (this is useful at the end of our
3818   // recursion).
3819   if (idx_range.empty())
3820     return V;
3821   // We have indices, so V should have an indexable type.
3822   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3823          "Not looking at a struct or array?");
3824   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3825          "Invalid indices for type?");
3826 
3827   if (Constant *C = dyn_cast<Constant>(V)) {
3828     C = C->getAggregateElement(idx_range[0]);
3829     if (!C) return nullptr;
3830     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3831   }
3832 
3833   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3834     // Loop the indices for the insertvalue instruction in parallel with the
3835     // requested indices
3836     const unsigned *req_idx = idx_range.begin();
3837     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3838          i != e; ++i, ++req_idx) {
3839       if (req_idx == idx_range.end()) {
3840         // We can't handle this without inserting insertvalues
3841         if (!InsertBefore)
3842           return nullptr;
3843 
3844         // The requested index identifies a part of a nested aggregate. Handle
3845         // this specially. For example,
3846         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3847         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3848         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3849         // This can be changed into
3850         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3851         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3852         // which allows the unused 0,0 element from the nested struct to be
3853         // removed.
3854         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3855                                  InsertBefore);
3856       }
3857 
3858       // This insert value inserts something else than what we are looking for.
3859       // See if the (aggregate) value inserted into has the value we are
3860       // looking for, then.
3861       if (*req_idx != *i)
3862         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3863                                  InsertBefore);
3864     }
3865     // If we end up here, the indices of the insertvalue match with those
3866     // requested (though possibly only partially). Now we recursively look at
3867     // the inserted value, passing any remaining indices.
3868     return FindInsertedValue(I->getInsertedValueOperand(),
3869                              makeArrayRef(req_idx, idx_range.end()),
3870                              InsertBefore);
3871   }
3872 
3873   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3874     // If we're extracting a value from an aggregate that was extracted from
3875     // something else, we can extract from that something else directly instead.
3876     // However, we will need to chain I's indices with the requested indices.
3877 
3878     // Calculate the number of indices required
3879     unsigned size = I->getNumIndices() + idx_range.size();
3880     // Allocate some space to put the new indices in
3881     SmallVector<unsigned, 5> Idxs;
3882     Idxs.reserve(size);
3883     // Add indices from the extract value instruction
3884     Idxs.append(I->idx_begin(), I->idx_end());
3885 
3886     // Add requested indices
3887     Idxs.append(idx_range.begin(), idx_range.end());
3888 
3889     assert(Idxs.size() == size
3890            && "Number of indices added not correct?");
3891 
3892     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3893   }
3894   // Otherwise, we don't know (such as, extracting from a function return value
3895   // or load instruction)
3896   return nullptr;
3897 }
3898 
3899 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3900                                        unsigned CharSize) {
3901   // Make sure the GEP has exactly three arguments.
3902   if (GEP->getNumOperands() != 3)
3903     return false;
3904 
3905   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3906   // CharSize.
3907   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3908   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3909     return false;
3910 
3911   // Check to make sure that the first operand of the GEP is an integer and
3912   // has value 0 so that we are sure we're indexing into the initializer.
3913   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3914   if (!FirstIdx || !FirstIdx->isZero())
3915     return false;
3916 
3917   return true;
3918 }
3919 
3920 bool llvm::getConstantDataArrayInfo(const Value *V,
3921                                     ConstantDataArraySlice &Slice,
3922                                     unsigned ElementSize, uint64_t Offset) {
3923   assert(V);
3924 
3925   // Look through bitcast instructions and geps.
3926   V = V->stripPointerCasts();
3927 
3928   // If the value is a GEP instruction or constant expression, treat it as an
3929   // offset.
3930   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3931     // The GEP operator should be based on a pointer to string constant, and is
3932     // indexing into the string constant.
3933     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3934       return false;
3935 
3936     // If the second index isn't a ConstantInt, then this is a variable index
3937     // into the array.  If this occurs, we can't say anything meaningful about
3938     // the string.
3939     uint64_t StartIdx = 0;
3940     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3941       StartIdx = CI->getZExtValue();
3942     else
3943       return false;
3944     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3945                                     StartIdx + Offset);
3946   }
3947 
3948   // The GEP instruction, constant or instruction, must reference a global
3949   // variable that is a constant and is initialized. The referenced constant
3950   // initializer is the array that we'll use for optimization.
3951   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3952   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3953     return false;
3954 
3955   const ConstantDataArray *Array;
3956   ArrayType *ArrayTy;
3957   if (GV->getInitializer()->isNullValue()) {
3958     Type *GVTy = GV->getValueType();
3959     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3960       // A zeroinitializer for the array; there is no ConstantDataArray.
3961       Array = nullptr;
3962     } else {
3963       const DataLayout &DL = GV->getParent()->getDataLayout();
3964       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
3965       uint64_t Length = SizeInBytes / (ElementSize / 8);
3966       if (Length <= Offset)
3967         return false;
3968 
3969       Slice.Array = nullptr;
3970       Slice.Offset = 0;
3971       Slice.Length = Length - Offset;
3972       return true;
3973     }
3974   } else {
3975     // This must be a ConstantDataArray.
3976     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3977     if (!Array)
3978       return false;
3979     ArrayTy = Array->getType();
3980   }
3981   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3982     return false;
3983 
3984   uint64_t NumElts = ArrayTy->getArrayNumElements();
3985   if (Offset > NumElts)
3986     return false;
3987 
3988   Slice.Array = Array;
3989   Slice.Offset = Offset;
3990   Slice.Length = NumElts - Offset;
3991   return true;
3992 }
3993 
3994 /// This function computes the length of a null-terminated C string pointed to
3995 /// by V. If successful, it returns true and returns the string in Str.
3996 /// If unsuccessful, it returns false.
3997 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3998                                  uint64_t Offset, bool TrimAtNul) {
3999   ConstantDataArraySlice Slice;
4000   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4001     return false;
4002 
4003   if (Slice.Array == nullptr) {
4004     if (TrimAtNul) {
4005       Str = StringRef();
4006       return true;
4007     }
4008     if (Slice.Length == 1) {
4009       Str = StringRef("", 1);
4010       return true;
4011     }
4012     // We cannot instantiate a StringRef as we do not have an appropriate string
4013     // of 0s at hand.
4014     return false;
4015   }
4016 
4017   // Start out with the entire array in the StringRef.
4018   Str = Slice.Array->getAsString();
4019   // Skip over 'offset' bytes.
4020   Str = Str.substr(Slice.Offset);
4021 
4022   if (TrimAtNul) {
4023     // Trim off the \0 and anything after it.  If the array is not nul
4024     // terminated, we just return the whole end of string.  The client may know
4025     // some other way that the string is length-bound.
4026     Str = Str.substr(0, Str.find('\0'));
4027   }
4028   return true;
4029 }
4030 
4031 // These next two are very similar to the above, but also look through PHI
4032 // nodes.
4033 // TODO: See if we can integrate these two together.
4034 
4035 /// If we can compute the length of the string pointed to by
4036 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4037 static uint64_t GetStringLengthH(const Value *V,
4038                                  SmallPtrSetImpl<const PHINode*> &PHIs,
4039                                  unsigned CharSize) {
4040   // Look through noop bitcast instructions.
4041   V = V->stripPointerCasts();
4042 
4043   // If this is a PHI node, there are two cases: either we have already seen it
4044   // or we haven't.
4045   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4046     if (!PHIs.insert(PN).second)
4047       return ~0ULL;  // already in the set.
4048 
4049     // If it was new, see if all the input strings are the same length.
4050     uint64_t LenSoFar = ~0ULL;
4051     for (Value *IncValue : PN->incoming_values()) {
4052       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4053       if (Len == 0) return 0; // Unknown length -> unknown.
4054 
4055       if (Len == ~0ULL) continue;
4056 
4057       if (Len != LenSoFar && LenSoFar != ~0ULL)
4058         return 0;    // Disagree -> unknown.
4059       LenSoFar = Len;
4060     }
4061 
4062     // Success, all agree.
4063     return LenSoFar;
4064   }
4065 
4066   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4067   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4068     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4069     if (Len1 == 0) return 0;
4070     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4071     if (Len2 == 0) return 0;
4072     if (Len1 == ~0ULL) return Len2;
4073     if (Len2 == ~0ULL) return Len1;
4074     if (Len1 != Len2) return 0;
4075     return Len1;
4076   }
4077 
4078   // Otherwise, see if we can read the string.
4079   ConstantDataArraySlice Slice;
4080   if (!getConstantDataArrayInfo(V, Slice, CharSize))
4081     return 0;
4082 
4083   if (Slice.Array == nullptr)
4084     return 1;
4085 
4086   // Search for nul characters
4087   unsigned NullIndex = 0;
4088   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4089     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4090       break;
4091   }
4092 
4093   return NullIndex + 1;
4094 }
4095 
4096 /// If we can compute the length of the string pointed to by
4097 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4098 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4099   if (!V->getType()->isPointerTy())
4100     return 0;
4101 
4102   SmallPtrSet<const PHINode*, 32> PHIs;
4103   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4104   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4105   // an empty string as a length.
4106   return Len == ~0ULL ? 1 : Len;
4107 }
4108 
4109 const Value *
4110 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4111                                            bool MustPreserveNullness) {
4112   assert(Call &&
4113          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4114   if (const Value *RV = Call->getReturnedArgOperand())
4115     return RV;
4116   // This can be used only as a aliasing property.
4117   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4118           Call, MustPreserveNullness))
4119     return Call->getArgOperand(0);
4120   return nullptr;
4121 }
4122 
4123 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4124     const CallBase *Call, bool MustPreserveNullness) {
4125   switch (Call->getIntrinsicID()) {
4126   case Intrinsic::launder_invariant_group:
4127   case Intrinsic::strip_invariant_group:
4128   case Intrinsic::aarch64_irg:
4129   case Intrinsic::aarch64_tagp:
4130     return true;
4131   case Intrinsic::ptrmask:
4132     return !MustPreserveNullness;
4133   default:
4134     return false;
4135   }
4136 }
4137 
4138 /// \p PN defines a loop-variant pointer to an object.  Check if the
4139 /// previous iteration of the loop was referring to the same object as \p PN.
4140 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4141                                          const LoopInfo *LI) {
4142   // Find the loop-defined value.
4143   Loop *L = LI->getLoopFor(PN->getParent());
4144   if (PN->getNumIncomingValues() != 2)
4145     return true;
4146 
4147   // Find the value from previous iteration.
4148   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4149   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4150     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4151   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4152     return true;
4153 
4154   // If a new pointer is loaded in the loop, the pointer references a different
4155   // object in every iteration.  E.g.:
4156   //    for (i)
4157   //       int *p = a[i];
4158   //       ...
4159   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4160     if (!L->isLoopInvariant(Load->getPointerOperand()))
4161       return false;
4162   return true;
4163 }
4164 
4165 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4166   if (!V->getType()->isPointerTy())
4167     return V;
4168   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4169     if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4170       V = GEP->getPointerOperand();
4171     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4172                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4173       V = cast<Operator>(V)->getOperand(0);
4174       if (!V->getType()->isPointerTy())
4175         return V;
4176     } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4177       if (GA->isInterposable())
4178         return V;
4179       V = GA->getAliasee();
4180     } else {
4181       if (auto *PHI = dyn_cast<PHINode>(V)) {
4182         // Look through single-arg phi nodes created by LCSSA.
4183         if (PHI->getNumIncomingValues() == 1) {
4184           V = PHI->getIncomingValue(0);
4185           continue;
4186         }
4187       } else if (auto *Call = dyn_cast<CallBase>(V)) {
4188         // CaptureTracking can know about special capturing properties of some
4189         // intrinsics like launder.invariant.group, that can't be expressed with
4190         // the attributes, but have properties like returning aliasing pointer.
4191         // Because some analysis may assume that nocaptured pointer is not
4192         // returned from some special intrinsic (because function would have to
4193         // be marked with returns attribute), it is crucial to use this function
4194         // because it should be in sync with CaptureTracking. Not using it may
4195         // cause weird miscompilations where 2 aliasing pointers are assumed to
4196         // noalias.
4197         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4198           V = RP;
4199           continue;
4200         }
4201       }
4202 
4203       return V;
4204     }
4205     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4206   }
4207   return V;
4208 }
4209 
4210 void llvm::getUnderlyingObjects(const Value *V,
4211                                 SmallVectorImpl<const Value *> &Objects,
4212                                 LoopInfo *LI, unsigned MaxLookup) {
4213   SmallPtrSet<const Value *, 4> Visited;
4214   SmallVector<const Value *, 4> Worklist;
4215   Worklist.push_back(V);
4216   do {
4217     const Value *P = Worklist.pop_back_val();
4218     P = getUnderlyingObject(P, MaxLookup);
4219 
4220     if (!Visited.insert(P).second)
4221       continue;
4222 
4223     if (auto *SI = dyn_cast<SelectInst>(P)) {
4224       Worklist.push_back(SI->getTrueValue());
4225       Worklist.push_back(SI->getFalseValue());
4226       continue;
4227     }
4228 
4229     if (auto *PN = dyn_cast<PHINode>(P)) {
4230       // If this PHI changes the underlying object in every iteration of the
4231       // loop, don't look through it.  Consider:
4232       //   int **A;
4233       //   for (i) {
4234       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
4235       //     Curr = A[i];
4236       //     *Prev, *Curr;
4237       //
4238       // Prev is tracking Curr one iteration behind so they refer to different
4239       // underlying objects.
4240       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4241           isSameUnderlyingObjectInLoop(PN, LI))
4242         append_range(Worklist, PN->incoming_values());
4243       continue;
4244     }
4245 
4246     Objects.push_back(P);
4247   } while (!Worklist.empty());
4248 }
4249 
4250 /// This is the function that does the work of looking through basic
4251 /// ptrtoint+arithmetic+inttoptr sequences.
4252 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4253   do {
4254     if (const Operator *U = dyn_cast<Operator>(V)) {
4255       // If we find a ptrtoint, we can transfer control back to the
4256       // regular getUnderlyingObjectFromInt.
4257       if (U->getOpcode() == Instruction::PtrToInt)
4258         return U->getOperand(0);
4259       // If we find an add of a constant, a multiplied value, or a phi, it's
4260       // likely that the other operand will lead us to the base
4261       // object. We don't have to worry about the case where the
4262       // object address is somehow being computed by the multiply,
4263       // because our callers only care when the result is an
4264       // identifiable object.
4265       if (U->getOpcode() != Instruction::Add ||
4266           (!isa<ConstantInt>(U->getOperand(1)) &&
4267            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4268            !isa<PHINode>(U->getOperand(1))))
4269         return V;
4270       V = U->getOperand(0);
4271     } else {
4272       return V;
4273     }
4274     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4275   } while (true);
4276 }
4277 
4278 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4279 /// ptrtoint+arithmetic+inttoptr sequences.
4280 /// It returns false if unidentified object is found in getUnderlyingObjects.
4281 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4282                                           SmallVectorImpl<Value *> &Objects) {
4283   SmallPtrSet<const Value *, 16> Visited;
4284   SmallVector<const Value *, 4> Working(1, V);
4285   do {
4286     V = Working.pop_back_val();
4287 
4288     SmallVector<const Value *, 4> Objs;
4289     getUnderlyingObjects(V, Objs);
4290 
4291     for (const Value *V : Objs) {
4292       if (!Visited.insert(V).second)
4293         continue;
4294       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4295         const Value *O =
4296           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4297         if (O->getType()->isPointerTy()) {
4298           Working.push_back(O);
4299           continue;
4300         }
4301       }
4302       // If getUnderlyingObjects fails to find an identifiable object,
4303       // getUnderlyingObjectsForCodeGen also fails for safety.
4304       if (!isIdentifiedObject(V)) {
4305         Objects.clear();
4306         return false;
4307       }
4308       Objects.push_back(const_cast<Value *>(V));
4309     }
4310   } while (!Working.empty());
4311   return true;
4312 }
4313 
4314 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4315   AllocaInst *Result = nullptr;
4316   SmallPtrSet<Value *, 4> Visited;
4317   SmallVector<Value *, 4> Worklist;
4318 
4319   auto AddWork = [&](Value *V) {
4320     if (Visited.insert(V).second)
4321       Worklist.push_back(V);
4322   };
4323 
4324   AddWork(V);
4325   do {
4326     V = Worklist.pop_back_val();
4327     assert(Visited.count(V));
4328 
4329     if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4330       if (Result && Result != AI)
4331         return nullptr;
4332       Result = AI;
4333     } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4334       AddWork(CI->getOperand(0));
4335     } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4336       for (Value *IncValue : PN->incoming_values())
4337         AddWork(IncValue);
4338     } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4339       AddWork(SI->getTrueValue());
4340       AddWork(SI->getFalseValue());
4341     } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4342       if (OffsetZero && !GEP->hasAllZeroIndices())
4343         return nullptr;
4344       AddWork(GEP->getPointerOperand());
4345     } else {
4346       return nullptr;
4347     }
4348   } while (!Worklist.empty());
4349 
4350   return Result;
4351 }
4352 
4353 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4354     const Value *V, bool AllowLifetime, bool AllowDroppable) {
4355   for (const User *U : V->users()) {
4356     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4357     if (!II)
4358       return false;
4359 
4360     if (AllowLifetime && II->isLifetimeStartOrEnd())
4361       continue;
4362 
4363     if (AllowDroppable && II->isDroppable())
4364       continue;
4365 
4366     return false;
4367   }
4368   return true;
4369 }
4370 
4371 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4372   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4373       V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4374 }
4375 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4376   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4377       V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4378 }
4379 
4380 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4381   if (!LI.isUnordered())
4382     return true;
4383   const Function &F = *LI.getFunction();
4384   // Speculative load may create a race that did not exist in the source.
4385   return F.hasFnAttribute(Attribute::SanitizeThread) ||
4386     // Speculative load may load data from dirty regions.
4387     F.hasFnAttribute(Attribute::SanitizeAddress) ||
4388     F.hasFnAttribute(Attribute::SanitizeHWAddress);
4389 }
4390 
4391 
4392 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4393                                         const Instruction *CtxI,
4394                                         const DominatorTree *DT) {
4395   const Operator *Inst = dyn_cast<Operator>(V);
4396   if (!Inst)
4397     return false;
4398 
4399   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4400     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4401       if (C->canTrap())
4402         return false;
4403 
4404   switch (Inst->getOpcode()) {
4405   default:
4406     return true;
4407   case Instruction::UDiv:
4408   case Instruction::URem: {
4409     // x / y is undefined if y == 0.
4410     const APInt *V;
4411     if (match(Inst->getOperand(1), m_APInt(V)))
4412       return *V != 0;
4413     return false;
4414   }
4415   case Instruction::SDiv:
4416   case Instruction::SRem: {
4417     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4418     const APInt *Numerator, *Denominator;
4419     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4420       return false;
4421     // We cannot hoist this division if the denominator is 0.
4422     if (*Denominator == 0)
4423       return false;
4424     // It's safe to hoist if the denominator is not 0 or -1.
4425     if (!Denominator->isAllOnesValue())
4426       return true;
4427     // At this point we know that the denominator is -1.  It is safe to hoist as
4428     // long we know that the numerator is not INT_MIN.
4429     if (match(Inst->getOperand(0), m_APInt(Numerator)))
4430       return !Numerator->isMinSignedValue();
4431     // The numerator *might* be MinSignedValue.
4432     return false;
4433   }
4434   case Instruction::Load: {
4435     const LoadInst *LI = cast<LoadInst>(Inst);
4436     if (mustSuppressSpeculation(*LI))
4437       return false;
4438     const DataLayout &DL = LI->getModule()->getDataLayout();
4439     return isDereferenceableAndAlignedPointer(
4440         LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4441         DL, CtxI, DT);
4442   }
4443   case Instruction::Call: {
4444     auto *CI = cast<const CallInst>(Inst);
4445     const Function *Callee = CI->getCalledFunction();
4446 
4447     // The called function could have undefined behavior or side-effects, even
4448     // if marked readnone nounwind.
4449     return Callee && Callee->isSpeculatable();
4450   }
4451   case Instruction::VAArg:
4452   case Instruction::Alloca:
4453   case Instruction::Invoke:
4454   case Instruction::CallBr:
4455   case Instruction::PHI:
4456   case Instruction::Store:
4457   case Instruction::Ret:
4458   case Instruction::Br:
4459   case Instruction::IndirectBr:
4460   case Instruction::Switch:
4461   case Instruction::Unreachable:
4462   case Instruction::Fence:
4463   case Instruction::AtomicRMW:
4464   case Instruction::AtomicCmpXchg:
4465   case Instruction::LandingPad:
4466   case Instruction::Resume:
4467   case Instruction::CatchSwitch:
4468   case Instruction::CatchPad:
4469   case Instruction::CatchRet:
4470   case Instruction::CleanupPad:
4471   case Instruction::CleanupRet:
4472     return false; // Misc instructions which have effects
4473   }
4474 }
4475 
4476 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4477   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4478 }
4479 
4480 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4481 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4482   switch (OR) {
4483     case ConstantRange::OverflowResult::MayOverflow:
4484       return OverflowResult::MayOverflow;
4485     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4486       return OverflowResult::AlwaysOverflowsLow;
4487     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4488       return OverflowResult::AlwaysOverflowsHigh;
4489     case ConstantRange::OverflowResult::NeverOverflows:
4490       return OverflowResult::NeverOverflows;
4491   }
4492   llvm_unreachable("Unknown OverflowResult");
4493 }
4494 
4495 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4496 static ConstantRange computeConstantRangeIncludingKnownBits(
4497     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4498     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4499     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4500   KnownBits Known = computeKnownBits(
4501       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4502   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4503   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4504   ConstantRange::PreferredRangeType RangeType =
4505       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4506   return CR1.intersectWith(CR2, RangeType);
4507 }
4508 
4509 OverflowResult llvm::computeOverflowForUnsignedMul(
4510     const Value *LHS, const Value *RHS, const DataLayout &DL,
4511     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4512     bool UseInstrInfo) {
4513   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4514                                         nullptr, UseInstrInfo);
4515   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4516                                         nullptr, UseInstrInfo);
4517   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4518   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4519   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4520 }
4521 
4522 OverflowResult
4523 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4524                                   const DataLayout &DL, AssumptionCache *AC,
4525                                   const Instruction *CxtI,
4526                                   const DominatorTree *DT, bool UseInstrInfo) {
4527   // Multiplying n * m significant bits yields a result of n + m significant
4528   // bits. If the total number of significant bits does not exceed the
4529   // result bit width (minus 1), there is no overflow.
4530   // This means if we have enough leading sign bits in the operands
4531   // we can guarantee that the result does not overflow.
4532   // Ref: "Hacker's Delight" by Henry Warren
4533   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4534 
4535   // Note that underestimating the number of sign bits gives a more
4536   // conservative answer.
4537   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4538                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4539 
4540   // First handle the easy case: if we have enough sign bits there's
4541   // definitely no overflow.
4542   if (SignBits > BitWidth + 1)
4543     return OverflowResult::NeverOverflows;
4544 
4545   // There are two ambiguous cases where there can be no overflow:
4546   //   SignBits == BitWidth + 1    and
4547   //   SignBits == BitWidth
4548   // The second case is difficult to check, therefore we only handle the
4549   // first case.
4550   if (SignBits == BitWidth + 1) {
4551     // It overflows only when both arguments are negative and the true
4552     // product is exactly the minimum negative number.
4553     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4554     // For simplicity we just check if at least one side is not negative.
4555     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4556                                           nullptr, UseInstrInfo);
4557     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4558                                           nullptr, UseInstrInfo);
4559     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4560       return OverflowResult::NeverOverflows;
4561   }
4562   return OverflowResult::MayOverflow;
4563 }
4564 
4565 OverflowResult llvm::computeOverflowForUnsignedAdd(
4566     const Value *LHS, const Value *RHS, const DataLayout &DL,
4567     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4568     bool UseInstrInfo) {
4569   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4570       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4571       nullptr, UseInstrInfo);
4572   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4573       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4574       nullptr, UseInstrInfo);
4575   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4576 }
4577 
4578 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4579                                                   const Value *RHS,
4580                                                   const AddOperator *Add,
4581                                                   const DataLayout &DL,
4582                                                   AssumptionCache *AC,
4583                                                   const Instruction *CxtI,
4584                                                   const DominatorTree *DT) {
4585   if (Add && Add->hasNoSignedWrap()) {
4586     return OverflowResult::NeverOverflows;
4587   }
4588 
4589   // If LHS and RHS each have at least two sign bits, the addition will look
4590   // like
4591   //
4592   // XX..... +
4593   // YY.....
4594   //
4595   // If the carry into the most significant position is 0, X and Y can't both
4596   // be 1 and therefore the carry out of the addition is also 0.
4597   //
4598   // If the carry into the most significant position is 1, X and Y can't both
4599   // be 0 and therefore the carry out of the addition is also 1.
4600   //
4601   // Since the carry into the most significant position is always equal to
4602   // the carry out of the addition, there is no signed overflow.
4603   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4604       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4605     return OverflowResult::NeverOverflows;
4606 
4607   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4608       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4609   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4610       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4611   OverflowResult OR =
4612       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4613   if (OR != OverflowResult::MayOverflow)
4614     return OR;
4615 
4616   // The remaining code needs Add to be available. Early returns if not so.
4617   if (!Add)
4618     return OverflowResult::MayOverflow;
4619 
4620   // If the sign of Add is the same as at least one of the operands, this add
4621   // CANNOT overflow. If this can be determined from the known bits of the
4622   // operands the above signedAddMayOverflow() check will have already done so.
4623   // The only other way to improve on the known bits is from an assumption, so
4624   // call computeKnownBitsFromAssume() directly.
4625   bool LHSOrRHSKnownNonNegative =
4626       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4627   bool LHSOrRHSKnownNegative =
4628       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4629   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4630     KnownBits AddKnown(LHSRange.getBitWidth());
4631     computeKnownBitsFromAssume(
4632         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4633     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4634         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4635       return OverflowResult::NeverOverflows;
4636   }
4637 
4638   return OverflowResult::MayOverflow;
4639 }
4640 
4641 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4642                                                    const Value *RHS,
4643                                                    const DataLayout &DL,
4644                                                    AssumptionCache *AC,
4645                                                    const Instruction *CxtI,
4646                                                    const DominatorTree *DT) {
4647   // Checking for conditions implied by dominating conditions may be expensive.
4648   // Limit it to usub_with_overflow calls for now.
4649   if (match(CxtI,
4650             m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4651     if (auto C =
4652             isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4653       if (*C)
4654         return OverflowResult::NeverOverflows;
4655       return OverflowResult::AlwaysOverflowsLow;
4656     }
4657   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4658       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4659   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4660       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4661   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4662 }
4663 
4664 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4665                                                  const Value *RHS,
4666                                                  const DataLayout &DL,
4667                                                  AssumptionCache *AC,
4668                                                  const Instruction *CxtI,
4669                                                  const DominatorTree *DT) {
4670   // If LHS and RHS each have at least two sign bits, the subtraction
4671   // cannot overflow.
4672   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4673       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4674     return OverflowResult::NeverOverflows;
4675 
4676   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4677       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4678   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4679       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4680   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4681 }
4682 
4683 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4684                                      const DominatorTree &DT) {
4685   SmallVector<const BranchInst *, 2> GuardingBranches;
4686   SmallVector<const ExtractValueInst *, 2> Results;
4687 
4688   for (const User *U : WO->users()) {
4689     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4690       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4691 
4692       if (EVI->getIndices()[0] == 0)
4693         Results.push_back(EVI);
4694       else {
4695         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4696 
4697         for (const auto *U : EVI->users())
4698           if (const auto *B = dyn_cast<BranchInst>(U)) {
4699             assert(B->isConditional() && "How else is it using an i1?");
4700             GuardingBranches.push_back(B);
4701           }
4702       }
4703     } else {
4704       // We are using the aggregate directly in a way we don't want to analyze
4705       // here (storing it to a global, say).
4706       return false;
4707     }
4708   }
4709 
4710   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4711     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4712     if (!NoWrapEdge.isSingleEdge())
4713       return false;
4714 
4715     // Check if all users of the add are provably no-wrap.
4716     for (const auto *Result : Results) {
4717       // If the extractvalue itself is not executed on overflow, the we don't
4718       // need to check each use separately, since domination is transitive.
4719       if (DT.dominates(NoWrapEdge, Result->getParent()))
4720         continue;
4721 
4722       for (auto &RU : Result->uses())
4723         if (!DT.dominates(NoWrapEdge, RU))
4724           return false;
4725     }
4726 
4727     return true;
4728   };
4729 
4730   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4731 }
4732 
4733 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4734   // See whether I has flags that may create poison
4735   if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4736     if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4737       return true;
4738   }
4739   if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4740     if (ExactOp->isExact())
4741       return true;
4742   if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4743     auto FMF = FP->getFastMathFlags();
4744     if (FMF.noNaNs() || FMF.noInfs())
4745       return true;
4746   }
4747 
4748   unsigned Opcode = Op->getOpcode();
4749 
4750   // Check whether opcode is a poison/undef-generating operation
4751   switch (Opcode) {
4752   case Instruction::Shl:
4753   case Instruction::AShr:
4754   case Instruction::LShr: {
4755     // Shifts return poison if shiftwidth is larger than the bitwidth.
4756     if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4757       SmallVector<Constant *, 4> ShiftAmounts;
4758       if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4759         unsigned NumElts = FVTy->getNumElements();
4760         for (unsigned i = 0; i < NumElts; ++i)
4761           ShiftAmounts.push_back(C->getAggregateElement(i));
4762       } else if (isa<ScalableVectorType>(C->getType()))
4763         return true; // Can't tell, just return true to be safe
4764       else
4765         ShiftAmounts.push_back(C);
4766 
4767       bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4768         auto *CI = dyn_cast_or_null<ConstantInt>(C);
4769         return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4770       });
4771       return !Safe;
4772     }
4773     return true;
4774   }
4775   case Instruction::FPToSI:
4776   case Instruction::FPToUI:
4777     // fptosi/ui yields poison if the resulting value does not fit in the
4778     // destination type.
4779     return true;
4780   case Instruction::Call:
4781   case Instruction::CallBr:
4782   case Instruction::Invoke: {
4783     const auto *CB = cast<CallBase>(Op);
4784     return !CB->hasRetAttr(Attribute::NoUndef);
4785   }
4786   case Instruction::InsertElement:
4787   case Instruction::ExtractElement: {
4788     // If index exceeds the length of the vector, it returns poison
4789     auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4790     unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4791     auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4792     if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
4793       return true;
4794     return false;
4795   }
4796   case Instruction::ShuffleVector: {
4797     // shufflevector may return undef.
4798     if (PoisonOnly)
4799       return false;
4800     ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4801                              ? cast<ConstantExpr>(Op)->getShuffleMask()
4802                              : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4803     return is_contained(Mask, UndefMaskElem);
4804   }
4805   case Instruction::FNeg:
4806   case Instruction::PHI:
4807   case Instruction::Select:
4808   case Instruction::URem:
4809   case Instruction::SRem:
4810   case Instruction::ExtractValue:
4811   case Instruction::InsertValue:
4812   case Instruction::Freeze:
4813   case Instruction::ICmp:
4814   case Instruction::FCmp:
4815     return false;
4816   case Instruction::GetElementPtr: {
4817     const auto *GEP = cast<GEPOperator>(Op);
4818     return GEP->isInBounds();
4819   }
4820   default: {
4821     const auto *CE = dyn_cast<ConstantExpr>(Op);
4822     if (isa<CastInst>(Op) || (CE && CE->isCast()))
4823       return false;
4824     else if (Instruction::isBinaryOp(Opcode))
4825       return false;
4826     // Be conservative and return true.
4827     return true;
4828   }
4829   }
4830 }
4831 
4832 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
4833   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
4834 }
4835 
4836 bool llvm::canCreatePoison(const Operator *Op) {
4837   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
4838 }
4839 
4840 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
4841                                   const Value *V, unsigned Depth) {
4842   if (ValAssumedPoison == V)
4843     return true;
4844 
4845   const unsigned MaxDepth = 2;
4846   if (Depth >= MaxDepth)
4847     return false;
4848 
4849   if (const auto *I = dyn_cast<Instruction>(V)) {
4850     if (propagatesPoison(cast<Operator>(I)))
4851       return any_of(I->operands(), [=](const Value *Op) {
4852         return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
4853       });
4854 
4855     // 'select ValAssumedPoison, _, _' is poison.
4856     if (const auto *SI = dyn_cast<SelectInst>(I))
4857       return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
4858                                    Depth + 1);
4859     // V  = extractvalue V0, idx
4860     // V2 = extractvalue V0, idx2
4861     // V0's elements are all poison or not. (e.g., add_with_overflow)
4862     const WithOverflowInst *II;
4863     if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
4864         match(ValAssumedPoison, m_ExtractValue(m_Specific(II))))
4865       return true;
4866   }
4867   return false;
4868 }
4869 
4870 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
4871                           unsigned Depth) {
4872   if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
4873     return true;
4874 
4875   if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
4876     return true;
4877 
4878   const unsigned MaxDepth = 2;
4879   if (Depth >= MaxDepth)
4880     return false;
4881 
4882   const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
4883   if (I && !canCreatePoison(cast<Operator>(I))) {
4884     return all_of(I->operands(), [=](const Value *Op) {
4885       return impliesPoison(Op, V, Depth + 1);
4886     });
4887   }
4888   return false;
4889 }
4890 
4891 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
4892   return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
4893 }
4894 
4895 static bool programUndefinedIfUndefOrPoison(const Value *V,
4896                                             bool PoisonOnly);
4897 
4898 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
4899                                              AssumptionCache *AC,
4900                                              const Instruction *CtxI,
4901                                              const DominatorTree *DT,
4902                                              unsigned Depth, bool PoisonOnly) {
4903   if (Depth >= MaxAnalysisRecursionDepth)
4904     return false;
4905 
4906   if (isa<MetadataAsValue>(V))
4907     return false;
4908 
4909   if (const auto *A = dyn_cast<Argument>(V)) {
4910     if (A->hasAttribute(Attribute::NoUndef))
4911       return true;
4912   }
4913 
4914   if (auto *C = dyn_cast<Constant>(V)) {
4915     if (isa<UndefValue>(C))
4916       return PoisonOnly && !isa<PoisonValue>(C);
4917 
4918     if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
4919         isa<ConstantPointerNull>(C) || isa<Function>(C))
4920       return true;
4921 
4922     if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
4923       return (PoisonOnly ? !C->containsPoisonElement()
4924                          : !C->containsUndefOrPoisonElement()) &&
4925              !C->containsConstantExpression();
4926   }
4927 
4928   // Strip cast operations from a pointer value.
4929   // Note that stripPointerCastsSameRepresentation can strip off getelementptr
4930   // inbounds with zero offset. To guarantee that the result isn't poison, the
4931   // stripped pointer is checked as it has to be pointing into an allocated
4932   // object or be null `null` to ensure `inbounds` getelement pointers with a
4933   // zero offset could not produce poison.
4934   // It can strip off addrspacecast that do not change bit representation as
4935   // well. We believe that such addrspacecast is equivalent to no-op.
4936   auto *StrippedV = V->stripPointerCastsSameRepresentation();
4937   if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
4938       isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
4939     return true;
4940 
4941   auto OpCheck = [&](const Value *V) {
4942     return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
4943                                             PoisonOnly);
4944   };
4945 
4946   if (auto *Opr = dyn_cast<Operator>(V)) {
4947     // If the value is a freeze instruction, then it can never
4948     // be undef or poison.
4949     if (isa<FreezeInst>(V))
4950       return true;
4951 
4952     if (const auto *CB = dyn_cast<CallBase>(V)) {
4953       if (CB->hasRetAttr(Attribute::NoUndef))
4954         return true;
4955     }
4956 
4957     if (const auto *PN = dyn_cast<PHINode>(V)) {
4958       unsigned Num = PN->getNumIncomingValues();
4959       bool IsWellDefined = true;
4960       for (unsigned i = 0; i < Num; ++i) {
4961         auto *TI = PN->getIncomingBlock(i)->getTerminator();
4962         if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
4963                                               DT, Depth + 1, PoisonOnly)) {
4964           IsWellDefined = false;
4965           break;
4966         }
4967       }
4968       if (IsWellDefined)
4969         return true;
4970     } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
4971       return true;
4972   }
4973 
4974   if (auto *I = dyn_cast<LoadInst>(V))
4975     if (I->getMetadata(LLVMContext::MD_noundef))
4976       return true;
4977 
4978   if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
4979     return true;
4980 
4981   // CxtI may be null or a cloned instruction.
4982   if (!CtxI || !CtxI->getParent() || !DT)
4983     return false;
4984 
4985   auto *DNode = DT->getNode(CtxI->getParent());
4986   if (!DNode)
4987     // Unreachable block
4988     return false;
4989 
4990   // If V is used as a branch condition before reaching CtxI, V cannot be
4991   // undef or poison.
4992   //   br V, BB1, BB2
4993   // BB1:
4994   //   CtxI ; V cannot be undef or poison here
4995   auto *Dominator = DNode->getIDom();
4996   while (Dominator) {
4997     auto *TI = Dominator->getBlock()->getTerminator();
4998 
4999     Value *Cond = nullptr;
5000     if (auto BI = dyn_cast<BranchInst>(TI)) {
5001       if (BI->isConditional())
5002         Cond = BI->getCondition();
5003     } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
5004       Cond = SI->getCondition();
5005     }
5006 
5007     if (Cond) {
5008       if (Cond == V)
5009         return true;
5010       else if (PoisonOnly && isa<Operator>(Cond)) {
5011         // For poison, we can analyze further
5012         auto *Opr = cast<Operator>(Cond);
5013         if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
5014           return true;
5015       }
5016     }
5017 
5018     Dominator = Dominator->getIDom();
5019   }
5020 
5021   SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NoUndef};
5022   if (getKnowledgeValidInContext(V, AttrKinds, CtxI, DT, AC))
5023     return true;
5024 
5025   return false;
5026 }
5027 
5028 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5029                                             const Instruction *CtxI,
5030                                             const DominatorTree *DT,
5031                                             unsigned Depth) {
5032   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5033 }
5034 
5035 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5036                                      const Instruction *CtxI,
5037                                      const DominatorTree *DT, unsigned Depth) {
5038   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5039 }
5040 
5041 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5042                                                  const DataLayout &DL,
5043                                                  AssumptionCache *AC,
5044                                                  const Instruction *CxtI,
5045                                                  const DominatorTree *DT) {
5046   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5047                                        Add, DL, AC, CxtI, DT);
5048 }
5049 
5050 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5051                                                  const Value *RHS,
5052                                                  const DataLayout &DL,
5053                                                  AssumptionCache *AC,
5054                                                  const Instruction *CxtI,
5055                                                  const DominatorTree *DT) {
5056   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5057 }
5058 
5059 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5060   // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5061   // of time because it's possible for another thread to interfere with it for an
5062   // arbitrary length of time, but programs aren't allowed to rely on that.
5063 
5064   // If there is no successor, then execution can't transfer to it.
5065   if (isa<ReturnInst>(I))
5066     return false;
5067   if (isa<UnreachableInst>(I))
5068     return false;
5069 
5070   // An instruction that returns without throwing must transfer control flow
5071   // to a successor.
5072   return !I->mayThrow() && I->willReturn();
5073 }
5074 
5075 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5076   // TODO: This is slightly conservative for invoke instruction since exiting
5077   // via an exception *is* normal control for them.
5078   for (const Instruction &I : *BB)
5079     if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5080       return false;
5081   return true;
5082 }
5083 
5084 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5085                                                   const Loop *L) {
5086   // The loop header is guaranteed to be executed for every iteration.
5087   //
5088   // FIXME: Relax this constraint to cover all basic blocks that are
5089   // guaranteed to be executed at every iteration.
5090   if (I->getParent() != L->getHeader()) return false;
5091 
5092   for (const Instruction &LI : *L->getHeader()) {
5093     if (&LI == I) return true;
5094     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5095   }
5096   llvm_unreachable("Instruction not contained in its own parent basic block.");
5097 }
5098 
5099 bool llvm::propagatesPoison(const Operator *I) {
5100   switch (I->getOpcode()) {
5101   case Instruction::Freeze:
5102   case Instruction::Select:
5103   case Instruction::PHI:
5104   case Instruction::Call:
5105   case Instruction::Invoke:
5106     return false;
5107   case Instruction::ICmp:
5108   case Instruction::FCmp:
5109   case Instruction::GetElementPtr:
5110     return true;
5111   default:
5112     if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5113       return true;
5114 
5115     // Be conservative and return false.
5116     return false;
5117   }
5118 }
5119 
5120 void llvm::getGuaranteedWellDefinedOps(
5121     const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
5122   switch (I->getOpcode()) {
5123     case Instruction::Store:
5124       Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5125       break;
5126 
5127     case Instruction::Load:
5128       Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5129       break;
5130 
5131     // Since dereferenceable attribute imply noundef, atomic operations
5132     // also implicitly have noundef pointers too
5133     case Instruction::AtomicCmpXchg:
5134       Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5135       break;
5136 
5137     case Instruction::AtomicRMW:
5138       Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5139       break;
5140 
5141     case Instruction::Call:
5142     case Instruction::Invoke: {
5143       const CallBase *CB = cast<CallBase>(I);
5144       if (CB->isIndirectCall())
5145         Operands.insert(CB->getCalledOperand());
5146       for (unsigned i = 0; i < CB->arg_size(); ++i) {
5147         if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5148             CB->paramHasAttr(i, Attribute::Dereferenceable))
5149           Operands.insert(CB->getArgOperand(i));
5150       }
5151       break;
5152     }
5153 
5154     default:
5155       break;
5156   }
5157 }
5158 
5159 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5160                                      SmallPtrSetImpl<const Value *> &Operands) {
5161   getGuaranteedWellDefinedOps(I, Operands);
5162   switch (I->getOpcode()) {
5163   // Divisors of these operations are allowed to be partially undef.
5164   case Instruction::UDiv:
5165   case Instruction::SDiv:
5166   case Instruction::URem:
5167   case Instruction::SRem:
5168     Operands.insert(I->getOperand(1));
5169     break;
5170 
5171   default:
5172     break;
5173   }
5174 }
5175 
5176 bool llvm::mustTriggerUB(const Instruction *I,
5177                          const SmallSet<const Value *, 16>& KnownPoison) {
5178   SmallPtrSet<const Value *, 4> NonPoisonOps;
5179   getGuaranteedNonPoisonOps(I, NonPoisonOps);
5180 
5181   for (const auto *V : NonPoisonOps)
5182     if (KnownPoison.count(V))
5183       return true;
5184 
5185   return false;
5186 }
5187 
5188 static bool programUndefinedIfUndefOrPoison(const Value *V,
5189                                             bool PoisonOnly) {
5190   // We currently only look for uses of values within the same basic
5191   // block, as that makes it easier to guarantee that the uses will be
5192   // executed given that Inst is executed.
5193   //
5194   // FIXME: Expand this to consider uses beyond the same basic block. To do
5195   // this, look out for the distinction between post-dominance and strong
5196   // post-dominance.
5197   const BasicBlock *BB = nullptr;
5198   BasicBlock::const_iterator Begin;
5199   if (const auto *Inst = dyn_cast<Instruction>(V)) {
5200     BB = Inst->getParent();
5201     Begin = Inst->getIterator();
5202     Begin++;
5203   } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5204     BB = &Arg->getParent()->getEntryBlock();
5205     Begin = BB->begin();
5206   } else {
5207     return false;
5208   }
5209 
5210   BasicBlock::const_iterator End = BB->end();
5211 
5212   if (!PoisonOnly) {
5213     // Since undef does not propagate eagerly, be conservative & just check
5214     // whether a value is directly passed to an instruction that must take
5215     // well-defined operands.
5216 
5217     for (auto &I : make_range(Begin, End)) {
5218       SmallPtrSet<const Value *, 4> WellDefinedOps;
5219       getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5220       for (auto *Op : WellDefinedOps) {
5221         if (Op == V)
5222           return true;
5223       }
5224       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5225         break;
5226     }
5227     return false;
5228   }
5229 
5230   // Set of instructions that we have proved will yield poison if Inst
5231   // does.
5232   SmallSet<const Value *, 16> YieldsPoison;
5233   SmallSet<const BasicBlock *, 4> Visited;
5234 
5235   YieldsPoison.insert(V);
5236   auto Propagate = [&](const User *User) {
5237     if (propagatesPoison(cast<Operator>(User)))
5238       YieldsPoison.insert(User);
5239   };
5240   for_each(V->users(), Propagate);
5241   Visited.insert(BB);
5242 
5243   unsigned Iter = 0;
5244   while (Iter++ < MaxAnalysisRecursionDepth) {
5245     for (auto &I : make_range(Begin, End)) {
5246       if (mustTriggerUB(&I, YieldsPoison))
5247         return true;
5248       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5249         return false;
5250 
5251       // Mark poison that propagates from I through uses of I.
5252       if (YieldsPoison.count(&I))
5253         for_each(I.users(), Propagate);
5254     }
5255 
5256     if (auto *NextBB = BB->getSingleSuccessor()) {
5257       if (Visited.insert(NextBB).second) {
5258         BB = NextBB;
5259         Begin = BB->getFirstNonPHI()->getIterator();
5260         End = BB->end();
5261         continue;
5262       }
5263     }
5264 
5265     break;
5266   }
5267   return false;
5268 }
5269 
5270 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5271   return ::programUndefinedIfUndefOrPoison(Inst, false);
5272 }
5273 
5274 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5275   return ::programUndefinedIfUndefOrPoison(Inst, true);
5276 }
5277 
5278 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5279   if (FMF.noNaNs())
5280     return true;
5281 
5282   if (auto *C = dyn_cast<ConstantFP>(V))
5283     return !C->isNaN();
5284 
5285   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5286     if (!C->getElementType()->isFloatingPointTy())
5287       return false;
5288     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5289       if (C->getElementAsAPFloat(I).isNaN())
5290         return false;
5291     }
5292     return true;
5293   }
5294 
5295   if (isa<ConstantAggregateZero>(V))
5296     return true;
5297 
5298   return false;
5299 }
5300 
5301 static bool isKnownNonZero(const Value *V) {
5302   if (auto *C = dyn_cast<ConstantFP>(V))
5303     return !C->isZero();
5304 
5305   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5306     if (!C->getElementType()->isFloatingPointTy())
5307       return false;
5308     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5309       if (C->getElementAsAPFloat(I).isZero())
5310         return false;
5311     }
5312     return true;
5313   }
5314 
5315   return false;
5316 }
5317 
5318 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5319 /// Given non-min/max outer cmp/select from the clamp pattern this
5320 /// function recognizes if it can be substitued by a "canonical" min/max
5321 /// pattern.
5322 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5323                                                Value *CmpLHS, Value *CmpRHS,
5324                                                Value *TrueVal, Value *FalseVal,
5325                                                Value *&LHS, Value *&RHS) {
5326   // Try to match
5327   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5328   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5329   // and return description of the outer Max/Min.
5330 
5331   // First, check if select has inverse order:
5332   if (CmpRHS == FalseVal) {
5333     std::swap(TrueVal, FalseVal);
5334     Pred = CmpInst::getInversePredicate(Pred);
5335   }
5336 
5337   // Assume success now. If there's no match, callers should not use these anyway.
5338   LHS = TrueVal;
5339   RHS = FalseVal;
5340 
5341   const APFloat *FC1;
5342   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5343     return {SPF_UNKNOWN, SPNB_NA, false};
5344 
5345   const APFloat *FC2;
5346   switch (Pred) {
5347   case CmpInst::FCMP_OLT:
5348   case CmpInst::FCMP_OLE:
5349   case CmpInst::FCMP_ULT:
5350   case CmpInst::FCMP_ULE:
5351     if (match(FalseVal,
5352               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5353                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5354         *FC1 < *FC2)
5355       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5356     break;
5357   case CmpInst::FCMP_OGT:
5358   case CmpInst::FCMP_OGE:
5359   case CmpInst::FCMP_UGT:
5360   case CmpInst::FCMP_UGE:
5361     if (match(FalseVal,
5362               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5363                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5364         *FC1 > *FC2)
5365       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5366     break;
5367   default:
5368     break;
5369   }
5370 
5371   return {SPF_UNKNOWN, SPNB_NA, false};
5372 }
5373 
5374 /// Recognize variations of:
5375 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5376 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5377                                       Value *CmpLHS, Value *CmpRHS,
5378                                       Value *TrueVal, Value *FalseVal) {
5379   // Swap the select operands and predicate to match the patterns below.
5380   if (CmpRHS != TrueVal) {
5381     Pred = ICmpInst::getSwappedPredicate(Pred);
5382     std::swap(TrueVal, FalseVal);
5383   }
5384   const APInt *C1;
5385   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5386     const APInt *C2;
5387     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5388     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5389         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5390       return {SPF_SMAX, SPNB_NA, false};
5391 
5392     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5393     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5394         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5395       return {SPF_SMIN, SPNB_NA, false};
5396 
5397     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5398     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5399         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5400       return {SPF_UMAX, SPNB_NA, false};
5401 
5402     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5403     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5404         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5405       return {SPF_UMIN, SPNB_NA, false};
5406   }
5407   return {SPF_UNKNOWN, SPNB_NA, false};
5408 }
5409 
5410 /// Recognize variations of:
5411 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5412 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5413                                                Value *CmpLHS, Value *CmpRHS,
5414                                                Value *TVal, Value *FVal,
5415                                                unsigned Depth) {
5416   // TODO: Allow FP min/max with nnan/nsz.
5417   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5418 
5419   Value *A = nullptr, *B = nullptr;
5420   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5421   if (!SelectPatternResult::isMinOrMax(L.Flavor))
5422     return {SPF_UNKNOWN, SPNB_NA, false};
5423 
5424   Value *C = nullptr, *D = nullptr;
5425   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5426   if (L.Flavor != R.Flavor)
5427     return {SPF_UNKNOWN, SPNB_NA, false};
5428 
5429   // We have something like: x Pred y ? min(a, b) : min(c, d).
5430   // Try to match the compare to the min/max operations of the select operands.
5431   // First, make sure we have the right compare predicate.
5432   switch (L.Flavor) {
5433   case SPF_SMIN:
5434     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5435       Pred = ICmpInst::getSwappedPredicate(Pred);
5436       std::swap(CmpLHS, CmpRHS);
5437     }
5438     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5439       break;
5440     return {SPF_UNKNOWN, SPNB_NA, false};
5441   case SPF_SMAX:
5442     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5443       Pred = ICmpInst::getSwappedPredicate(Pred);
5444       std::swap(CmpLHS, CmpRHS);
5445     }
5446     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5447       break;
5448     return {SPF_UNKNOWN, SPNB_NA, false};
5449   case SPF_UMIN:
5450     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5451       Pred = ICmpInst::getSwappedPredicate(Pred);
5452       std::swap(CmpLHS, CmpRHS);
5453     }
5454     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5455       break;
5456     return {SPF_UNKNOWN, SPNB_NA, false};
5457   case SPF_UMAX:
5458     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5459       Pred = ICmpInst::getSwappedPredicate(Pred);
5460       std::swap(CmpLHS, CmpRHS);
5461     }
5462     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5463       break;
5464     return {SPF_UNKNOWN, SPNB_NA, false};
5465   default:
5466     return {SPF_UNKNOWN, SPNB_NA, false};
5467   }
5468 
5469   // If there is a common operand in the already matched min/max and the other
5470   // min/max operands match the compare operands (either directly or inverted),
5471   // then this is min/max of the same flavor.
5472 
5473   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5474   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5475   if (D == B) {
5476     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5477                                          match(A, m_Not(m_Specific(CmpRHS)))))
5478       return {L.Flavor, SPNB_NA, false};
5479   }
5480   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5481   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5482   if (C == B) {
5483     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5484                                          match(A, m_Not(m_Specific(CmpRHS)))))
5485       return {L.Flavor, SPNB_NA, false};
5486   }
5487   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5488   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5489   if (D == A) {
5490     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5491                                          match(B, m_Not(m_Specific(CmpRHS)))))
5492       return {L.Flavor, SPNB_NA, false};
5493   }
5494   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5495   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5496   if (C == A) {
5497     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5498                                          match(B, m_Not(m_Specific(CmpRHS)))))
5499       return {L.Flavor, SPNB_NA, false};
5500   }
5501 
5502   return {SPF_UNKNOWN, SPNB_NA, false};
5503 }
5504 
5505 /// If the input value is the result of a 'not' op, constant integer, or vector
5506 /// splat of a constant integer, return the bitwise-not source value.
5507 /// TODO: This could be extended to handle non-splat vector integer constants.
5508 static Value *getNotValue(Value *V) {
5509   Value *NotV;
5510   if (match(V, m_Not(m_Value(NotV))))
5511     return NotV;
5512 
5513   const APInt *C;
5514   if (match(V, m_APInt(C)))
5515     return ConstantInt::get(V->getType(), ~(*C));
5516 
5517   return nullptr;
5518 }
5519 
5520 /// Match non-obvious integer minimum and maximum sequences.
5521 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5522                                        Value *CmpLHS, Value *CmpRHS,
5523                                        Value *TrueVal, Value *FalseVal,
5524                                        Value *&LHS, Value *&RHS,
5525                                        unsigned Depth) {
5526   // Assume success. If there's no match, callers should not use these anyway.
5527   LHS = TrueVal;
5528   RHS = FalseVal;
5529 
5530   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5531   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5532     return SPR;
5533 
5534   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5535   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5536     return SPR;
5537 
5538   // Look through 'not' ops to find disguised min/max.
5539   // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5540   // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5541   if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5542     switch (Pred) {
5543     case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5544     case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5545     case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5546     case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5547     default: break;
5548     }
5549   }
5550 
5551   // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5552   // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5553   if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5554     switch (Pred) {
5555     case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5556     case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5557     case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5558     case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5559     default: break;
5560     }
5561   }
5562 
5563   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5564     return {SPF_UNKNOWN, SPNB_NA, false};
5565 
5566   // Z = X -nsw Y
5567   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5568   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5569   if (match(TrueVal, m_Zero()) &&
5570       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5571     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5572 
5573   // Z = X -nsw Y
5574   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5575   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5576   if (match(FalseVal, m_Zero()) &&
5577       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5578     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5579 
5580   const APInt *C1;
5581   if (!match(CmpRHS, m_APInt(C1)))
5582     return {SPF_UNKNOWN, SPNB_NA, false};
5583 
5584   // An unsigned min/max can be written with a signed compare.
5585   const APInt *C2;
5586   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5587       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5588     // Is the sign bit set?
5589     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5590     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5591     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5592         C2->isMaxSignedValue())
5593       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5594 
5595     // Is the sign bit clear?
5596     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5597     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5598     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5599         C2->isMinSignedValue())
5600       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5601   }
5602 
5603   return {SPF_UNKNOWN, SPNB_NA, false};
5604 }
5605 
5606 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5607   assert(X && Y && "Invalid operand");
5608 
5609   // X = sub (0, Y) || X = sub nsw (0, Y)
5610   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5611       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5612     return true;
5613 
5614   // Y = sub (0, X) || Y = sub nsw (0, X)
5615   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5616       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5617     return true;
5618 
5619   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5620   Value *A, *B;
5621   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5622                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5623          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5624                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5625 }
5626 
5627 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5628                                               FastMathFlags FMF,
5629                                               Value *CmpLHS, Value *CmpRHS,
5630                                               Value *TrueVal, Value *FalseVal,
5631                                               Value *&LHS, Value *&RHS,
5632                                               unsigned Depth) {
5633   if (CmpInst::isFPPredicate(Pred)) {
5634     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5635     // 0.0 operand, set the compare's 0.0 operands to that same value for the
5636     // purpose of identifying min/max. Disregard vector constants with undefined
5637     // elements because those can not be back-propagated for analysis.
5638     Value *OutputZeroVal = nullptr;
5639     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5640         !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
5641       OutputZeroVal = TrueVal;
5642     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5643              !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
5644       OutputZeroVal = FalseVal;
5645 
5646     if (OutputZeroVal) {
5647       if (match(CmpLHS, m_AnyZeroFP()))
5648         CmpLHS = OutputZeroVal;
5649       if (match(CmpRHS, m_AnyZeroFP()))
5650         CmpRHS = OutputZeroVal;
5651     }
5652   }
5653 
5654   LHS = CmpLHS;
5655   RHS = CmpRHS;
5656 
5657   // Signed zero may return inconsistent results between implementations.
5658   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5659   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5660   // Therefore, we behave conservatively and only proceed if at least one of the
5661   // operands is known to not be zero or if we don't care about signed zero.
5662   switch (Pred) {
5663   default: break;
5664   // FIXME: Include OGT/OLT/UGT/ULT.
5665   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5666   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5667     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5668         !isKnownNonZero(CmpRHS))
5669       return {SPF_UNKNOWN, SPNB_NA, false};
5670   }
5671 
5672   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5673   bool Ordered = false;
5674 
5675   // When given one NaN and one non-NaN input:
5676   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5677   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5678   //     ordered comparison fails), which could be NaN or non-NaN.
5679   // so here we discover exactly what NaN behavior is required/accepted.
5680   if (CmpInst::isFPPredicate(Pred)) {
5681     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5682     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5683 
5684     if (LHSSafe && RHSSafe) {
5685       // Both operands are known non-NaN.
5686       NaNBehavior = SPNB_RETURNS_ANY;
5687     } else if (CmpInst::isOrdered(Pred)) {
5688       // An ordered comparison will return false when given a NaN, so it
5689       // returns the RHS.
5690       Ordered = true;
5691       if (LHSSafe)
5692         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5693         NaNBehavior = SPNB_RETURNS_NAN;
5694       else if (RHSSafe)
5695         NaNBehavior = SPNB_RETURNS_OTHER;
5696       else
5697         // Completely unsafe.
5698         return {SPF_UNKNOWN, SPNB_NA, false};
5699     } else {
5700       Ordered = false;
5701       // An unordered comparison will return true when given a NaN, so it
5702       // returns the LHS.
5703       if (LHSSafe)
5704         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5705         NaNBehavior = SPNB_RETURNS_OTHER;
5706       else if (RHSSafe)
5707         NaNBehavior = SPNB_RETURNS_NAN;
5708       else
5709         // Completely unsafe.
5710         return {SPF_UNKNOWN, SPNB_NA, false};
5711     }
5712   }
5713 
5714   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5715     std::swap(CmpLHS, CmpRHS);
5716     Pred = CmpInst::getSwappedPredicate(Pred);
5717     if (NaNBehavior == SPNB_RETURNS_NAN)
5718       NaNBehavior = SPNB_RETURNS_OTHER;
5719     else if (NaNBehavior == SPNB_RETURNS_OTHER)
5720       NaNBehavior = SPNB_RETURNS_NAN;
5721     Ordered = !Ordered;
5722   }
5723 
5724   // ([if]cmp X, Y) ? X : Y
5725   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5726     switch (Pred) {
5727     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5728     case ICmpInst::ICMP_UGT:
5729     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5730     case ICmpInst::ICMP_SGT:
5731     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5732     case ICmpInst::ICMP_ULT:
5733     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5734     case ICmpInst::ICMP_SLT:
5735     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5736     case FCmpInst::FCMP_UGT:
5737     case FCmpInst::FCMP_UGE:
5738     case FCmpInst::FCMP_OGT:
5739     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5740     case FCmpInst::FCMP_ULT:
5741     case FCmpInst::FCMP_ULE:
5742     case FCmpInst::FCMP_OLT:
5743     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5744     }
5745   }
5746 
5747   if (isKnownNegation(TrueVal, FalseVal)) {
5748     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5749     // match against either LHS or sext(LHS).
5750     auto MaybeSExtCmpLHS =
5751         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5752     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5753     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5754     if (match(TrueVal, MaybeSExtCmpLHS)) {
5755       // Set the return values. If the compare uses the negated value (-X >s 0),
5756       // swap the return values because the negated value is always 'RHS'.
5757       LHS = TrueVal;
5758       RHS = FalseVal;
5759       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5760         std::swap(LHS, RHS);
5761 
5762       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5763       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5764       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5765         return {SPF_ABS, SPNB_NA, false};
5766 
5767       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5768       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5769         return {SPF_ABS, SPNB_NA, false};
5770 
5771       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5772       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5773       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5774         return {SPF_NABS, SPNB_NA, false};
5775     }
5776     else if (match(FalseVal, MaybeSExtCmpLHS)) {
5777       // Set the return values. If the compare uses the negated value (-X >s 0),
5778       // swap the return values because the negated value is always 'RHS'.
5779       LHS = FalseVal;
5780       RHS = TrueVal;
5781       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5782         std::swap(LHS, RHS);
5783 
5784       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5785       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5786       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5787         return {SPF_NABS, SPNB_NA, false};
5788 
5789       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5790       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5791       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5792         return {SPF_ABS, SPNB_NA, false};
5793     }
5794   }
5795 
5796   if (CmpInst::isIntPredicate(Pred))
5797     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5798 
5799   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5800   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5801   // semantics than minNum. Be conservative in such case.
5802   if (NaNBehavior != SPNB_RETURNS_ANY ||
5803       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5804        !isKnownNonZero(CmpRHS)))
5805     return {SPF_UNKNOWN, SPNB_NA, false};
5806 
5807   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5808 }
5809 
5810 /// Helps to match a select pattern in case of a type mismatch.
5811 ///
5812 /// The function processes the case when type of true and false values of a
5813 /// select instruction differs from type of the cmp instruction operands because
5814 /// of a cast instruction. The function checks if it is legal to move the cast
5815 /// operation after "select". If yes, it returns the new second value of
5816 /// "select" (with the assumption that cast is moved):
5817 /// 1. As operand of cast instruction when both values of "select" are same cast
5818 /// instructions.
5819 /// 2. As restored constant (by applying reverse cast operation) when the first
5820 /// value of the "select" is a cast operation and the second value is a
5821 /// constant.
5822 /// NOTE: We return only the new second value because the first value could be
5823 /// accessed as operand of cast instruction.
5824 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
5825                               Instruction::CastOps *CastOp) {
5826   auto *Cast1 = dyn_cast<CastInst>(V1);
5827   if (!Cast1)
5828     return nullptr;
5829 
5830   *CastOp = Cast1->getOpcode();
5831   Type *SrcTy = Cast1->getSrcTy();
5832   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5833     // If V1 and V2 are both the same cast from the same type, look through V1.
5834     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5835       return Cast2->getOperand(0);
5836     return nullptr;
5837   }
5838 
5839   auto *C = dyn_cast<Constant>(V2);
5840   if (!C)
5841     return nullptr;
5842 
5843   Constant *CastedTo = nullptr;
5844   switch (*CastOp) {
5845   case Instruction::ZExt:
5846     if (CmpI->isUnsigned())
5847       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5848     break;
5849   case Instruction::SExt:
5850     if (CmpI->isSigned())
5851       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
5852     break;
5853   case Instruction::Trunc:
5854     Constant *CmpConst;
5855     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
5856         CmpConst->getType() == SrcTy) {
5857       // Here we have the following case:
5858       //
5859       //   %cond = cmp iN %x, CmpConst
5860       //   %tr = trunc iN %x to iK
5861       //   %narrowsel = select i1 %cond, iK %t, iK C
5862       //
5863       // We can always move trunc after select operation:
5864       //
5865       //   %cond = cmp iN %x, CmpConst
5866       //   %widesel = select i1 %cond, iN %x, iN CmpConst
5867       //   %tr = trunc iN %widesel to iK
5868       //
5869       // Note that C could be extended in any way because we don't care about
5870       // upper bits after truncation. It can't be abs pattern, because it would
5871       // look like:
5872       //
5873       //   select i1 %cond, x, -x.
5874       //
5875       // So only min/max pattern could be matched. Such match requires widened C
5876       // == CmpConst. That is why set widened C = CmpConst, condition trunc
5877       // CmpConst == C is checked below.
5878       CastedTo = CmpConst;
5879     } else {
5880       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5881     }
5882     break;
5883   case Instruction::FPTrunc:
5884     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5885     break;
5886   case Instruction::FPExt:
5887     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5888     break;
5889   case Instruction::FPToUI:
5890     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5891     break;
5892   case Instruction::FPToSI:
5893     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5894     break;
5895   case Instruction::UIToFP:
5896     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5897     break;
5898   case Instruction::SIToFP:
5899     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5900     break;
5901   default:
5902     break;
5903   }
5904 
5905   if (!CastedTo)
5906     return nullptr;
5907 
5908   // Make sure the cast doesn't lose any information.
5909   Constant *CastedBack =
5910       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5911   if (CastedBack != C)
5912     return nullptr;
5913 
5914   return CastedTo;
5915 }
5916 
5917 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5918                                              Instruction::CastOps *CastOp,
5919                                              unsigned Depth) {
5920   if (Depth >= MaxAnalysisRecursionDepth)
5921     return {SPF_UNKNOWN, SPNB_NA, false};
5922 
5923   SelectInst *SI = dyn_cast<SelectInst>(V);
5924   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5925 
5926   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5927   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5928 
5929   Value *TrueVal = SI->getTrueValue();
5930   Value *FalseVal = SI->getFalseValue();
5931 
5932   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5933                                             CastOp, Depth);
5934 }
5935 
5936 SelectPatternResult llvm::matchDecomposedSelectPattern(
5937     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5938     Instruction::CastOps *CastOp, unsigned Depth) {
5939   CmpInst::Predicate Pred = CmpI->getPredicate();
5940   Value *CmpLHS = CmpI->getOperand(0);
5941   Value *CmpRHS = CmpI->getOperand(1);
5942   FastMathFlags FMF;
5943   if (isa<FPMathOperator>(CmpI))
5944     FMF = CmpI->getFastMathFlags();
5945 
5946   // Bail out early.
5947   if (CmpI->isEquality())
5948     return {SPF_UNKNOWN, SPNB_NA, false};
5949 
5950   // Deal with type mismatches.
5951   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5952     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5953       // If this is a potential fmin/fmax with a cast to integer, then ignore
5954       // -0.0 because there is no corresponding integer value.
5955       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5956         FMF.setNoSignedZeros();
5957       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5958                                   cast<CastInst>(TrueVal)->getOperand(0), C,
5959                                   LHS, RHS, Depth);
5960     }
5961     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5962       // If this is a potential fmin/fmax with a cast to integer, then ignore
5963       // -0.0 because there is no corresponding integer value.
5964       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5965         FMF.setNoSignedZeros();
5966       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5967                                   C, cast<CastInst>(FalseVal)->getOperand(0),
5968                                   LHS, RHS, Depth);
5969     }
5970   }
5971   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5972                               LHS, RHS, Depth);
5973 }
5974 
5975 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5976   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5977   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5978   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5979   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5980   if (SPF == SPF_FMINNUM)
5981     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5982   if (SPF == SPF_FMAXNUM)
5983     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5984   llvm_unreachable("unhandled!");
5985 }
5986 
5987 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5988   if (SPF == SPF_SMIN) return SPF_SMAX;
5989   if (SPF == SPF_UMIN) return SPF_UMAX;
5990   if (SPF == SPF_SMAX) return SPF_SMIN;
5991   if (SPF == SPF_UMAX) return SPF_UMIN;
5992   llvm_unreachable("unhandled!");
5993 }
5994 
5995 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
5996   switch (MinMaxID) {
5997   case Intrinsic::smax: return Intrinsic::smin;
5998   case Intrinsic::smin: return Intrinsic::smax;
5999   case Intrinsic::umax: return Intrinsic::umin;
6000   case Intrinsic::umin: return Intrinsic::umax;
6001   default: llvm_unreachable("Unexpected intrinsic");
6002   }
6003 }
6004 
6005 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
6006   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
6007 }
6008 
6009 std::pair<Intrinsic::ID, bool>
6010 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
6011   // Check if VL contains select instructions that can be folded into a min/max
6012   // vector intrinsic and return the intrinsic if it is possible.
6013   // TODO: Support floating point min/max.
6014   bool AllCmpSingleUse = true;
6015   SelectPatternResult SelectPattern;
6016   SelectPattern.Flavor = SPF_UNKNOWN;
6017   if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
6018         Value *LHS, *RHS;
6019         auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
6020         if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
6021             CurrentPattern.Flavor == SPF_FMINNUM ||
6022             CurrentPattern.Flavor == SPF_FMAXNUM ||
6023             !I->getType()->isIntOrIntVectorTy())
6024           return false;
6025         if (SelectPattern.Flavor != SPF_UNKNOWN &&
6026             SelectPattern.Flavor != CurrentPattern.Flavor)
6027           return false;
6028         SelectPattern = CurrentPattern;
6029         AllCmpSingleUse &=
6030             match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
6031         return true;
6032       })) {
6033     switch (SelectPattern.Flavor) {
6034     case SPF_SMIN:
6035       return {Intrinsic::smin, AllCmpSingleUse};
6036     case SPF_UMIN:
6037       return {Intrinsic::umin, AllCmpSingleUse};
6038     case SPF_SMAX:
6039       return {Intrinsic::smax, AllCmpSingleUse};
6040     case SPF_UMAX:
6041       return {Intrinsic::umax, AllCmpSingleUse};
6042     default:
6043       llvm_unreachable("unexpected select pattern flavor");
6044     }
6045   }
6046   return {Intrinsic::not_intrinsic, false};
6047 }
6048 
6049 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
6050                                  Value *&Start, Value *&Step) {
6051   // Handle the case of a simple two-predecessor recurrence PHI.
6052   // There's a lot more that could theoretically be done here, but
6053   // this is sufficient to catch some interesting cases.
6054   if (P->getNumIncomingValues() != 2)
6055     return false;
6056 
6057   for (unsigned i = 0; i != 2; ++i) {
6058     Value *L = P->getIncomingValue(i);
6059     Value *R = P->getIncomingValue(!i);
6060     Operator *LU = dyn_cast<Operator>(L);
6061     if (!LU)
6062       continue;
6063     unsigned Opcode = LU->getOpcode();
6064 
6065     switch (Opcode) {
6066     default:
6067       continue;
6068     // TODO: Expand list -- xor, div, gep, uaddo, etc..
6069     case Instruction::LShr:
6070     case Instruction::AShr:
6071     case Instruction::Shl:
6072     case Instruction::Add:
6073     case Instruction::Sub:
6074     case Instruction::And:
6075     case Instruction::Or:
6076     case Instruction::Mul: {
6077       Value *LL = LU->getOperand(0);
6078       Value *LR = LU->getOperand(1);
6079       // Find a recurrence.
6080       if (LL == P)
6081         L = LR;
6082       else if (LR == P)
6083         L = LL;
6084       else
6085         continue; // Check for recurrence with L and R flipped.
6086 
6087       break; // Match!
6088     }
6089     };
6090 
6091     // We have matched a recurrence of the form:
6092     //   %iv = [R, %entry], [%iv.next, %backedge]
6093     //   %iv.next = binop %iv, L
6094     // OR
6095     //   %iv = [R, %entry], [%iv.next, %backedge]
6096     //   %iv.next = binop L, %iv
6097     BO = cast<BinaryOperator>(LU);
6098     Start = R;
6099     Step = L;
6100     return true;
6101   }
6102   return false;
6103 }
6104 
6105 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
6106                                  Value *&Start, Value *&Step) {
6107   BinaryOperator *BO = nullptr;
6108   P = dyn_cast<PHINode>(I->getOperand(0));
6109   if (!P)
6110     P = dyn_cast<PHINode>(I->getOperand(1));
6111   return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
6112 }
6113 
6114 /// Return true if "icmp Pred LHS RHS" is always true.
6115 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6116                             const Value *RHS, const DataLayout &DL,
6117                             unsigned Depth) {
6118   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
6119   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6120     return true;
6121 
6122   switch (Pred) {
6123   default:
6124     return false;
6125 
6126   case CmpInst::ICMP_SLE: {
6127     const APInt *C;
6128 
6129     // LHS s<= LHS +_{nsw} C   if C >= 0
6130     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6131       return !C->isNegative();
6132     return false;
6133   }
6134 
6135   case CmpInst::ICMP_ULE: {
6136     const APInt *C;
6137 
6138     // LHS u<= LHS +_{nuw} C   for any C
6139     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6140       return true;
6141 
6142     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6143     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6144                                        const Value *&X,
6145                                        const APInt *&CA, const APInt *&CB) {
6146       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6147           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6148         return true;
6149 
6150       // If X & C == 0 then (X | C) == X +_{nuw} C
6151       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6152           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6153         KnownBits Known(CA->getBitWidth());
6154         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6155                          /*CxtI*/ nullptr, /*DT*/ nullptr);
6156         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6157           return true;
6158       }
6159 
6160       return false;
6161     };
6162 
6163     const Value *X;
6164     const APInt *CLHS, *CRHS;
6165     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6166       return CLHS->ule(*CRHS);
6167 
6168     return false;
6169   }
6170   }
6171 }
6172 
6173 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6174 /// ALHS ARHS" is true.  Otherwise, return None.
6175 static Optional<bool>
6176 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6177                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
6178                       const DataLayout &DL, unsigned Depth) {
6179   switch (Pred) {
6180   default:
6181     return None;
6182 
6183   case CmpInst::ICMP_SLT:
6184   case CmpInst::ICMP_SLE:
6185     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6186         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6187       return true;
6188     return None;
6189 
6190   case CmpInst::ICMP_ULT:
6191   case CmpInst::ICMP_ULE:
6192     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6193         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6194       return true;
6195     return None;
6196   }
6197 }
6198 
6199 /// Return true if the operands of the two compares match.  IsSwappedOps is true
6200 /// when the operands match, but are swapped.
6201 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6202                           const Value *BLHS, const Value *BRHS,
6203                           bool &IsSwappedOps) {
6204 
6205   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6206   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6207   return IsMatchingOps || IsSwappedOps;
6208 }
6209 
6210 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6211 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6212 /// Otherwise, return None if we can't infer anything.
6213 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6214                                                     CmpInst::Predicate BPred,
6215                                                     bool AreSwappedOps) {
6216   // Canonicalize the predicate as if the operands were not commuted.
6217   if (AreSwappedOps)
6218     BPred = ICmpInst::getSwappedPredicate(BPred);
6219 
6220   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6221     return true;
6222   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6223     return false;
6224 
6225   return None;
6226 }
6227 
6228 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6229 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6230 /// Otherwise, return None if we can't infer anything.
6231 static Optional<bool>
6232 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6233                                  const ConstantInt *C1,
6234                                  CmpInst::Predicate BPred,
6235                                  const ConstantInt *C2) {
6236   ConstantRange DomCR =
6237       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6238   ConstantRange CR =
6239       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
6240   ConstantRange Intersection = DomCR.intersectWith(CR);
6241   ConstantRange Difference = DomCR.difference(CR);
6242   if (Intersection.isEmptySet())
6243     return false;
6244   if (Difference.isEmptySet())
6245     return true;
6246   return None;
6247 }
6248 
6249 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6250 /// false.  Otherwise, return None if we can't infer anything.
6251 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6252                                          CmpInst::Predicate BPred,
6253                                          const Value *BLHS, const Value *BRHS,
6254                                          const DataLayout &DL, bool LHSIsTrue,
6255                                          unsigned Depth) {
6256   Value *ALHS = LHS->getOperand(0);
6257   Value *ARHS = LHS->getOperand(1);
6258 
6259   // The rest of the logic assumes the LHS condition is true.  If that's not the
6260   // case, invert the predicate to make it so.
6261   CmpInst::Predicate APred =
6262       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6263 
6264   // Can we infer anything when the two compares have matching operands?
6265   bool AreSwappedOps;
6266   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6267     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6268             APred, BPred, AreSwappedOps))
6269       return Implication;
6270     // No amount of additional analysis will infer the second condition, so
6271     // early exit.
6272     return None;
6273   }
6274 
6275   // Can we infer anything when the LHS operands match and the RHS operands are
6276   // constants (not necessarily matching)?
6277   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6278     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6279             APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6280       return Implication;
6281     // No amount of additional analysis will infer the second condition, so
6282     // early exit.
6283     return None;
6284   }
6285 
6286   if (APred == BPred)
6287     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6288   return None;
6289 }
6290 
6291 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6292 /// false.  Otherwise, return None if we can't infer anything.  We expect the
6293 /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction.
6294 static Optional<bool>
6295 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6296                    const Value *RHSOp0, const Value *RHSOp1,
6297                    const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6298   // The LHS must be an 'or', 'and', or a 'select' instruction.
6299   assert((LHS->getOpcode() == Instruction::And ||
6300           LHS->getOpcode() == Instruction::Or ||
6301           LHS->getOpcode() == Instruction::Select) &&
6302          "Expected LHS to be 'and', 'or', or 'select'.");
6303 
6304   assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6305 
6306   // If the result of an 'or' is false, then we know both legs of the 'or' are
6307   // false.  Similarly, if the result of an 'and' is true, then we know both
6308   // legs of the 'and' are true.
6309   const Value *ALHS, *ARHS;
6310   if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6311       (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6312     // FIXME: Make this non-recursion.
6313     if (Optional<bool> Implication = isImpliedCondition(
6314             ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6315       return Implication;
6316     if (Optional<bool> Implication = isImpliedCondition(
6317             ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6318       return Implication;
6319     return None;
6320   }
6321   return None;
6322 }
6323 
6324 Optional<bool>
6325 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6326                          const Value *RHSOp0, const Value *RHSOp1,
6327                          const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6328   // Bail out when we hit the limit.
6329   if (Depth == MaxAnalysisRecursionDepth)
6330     return None;
6331 
6332   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6333   // example.
6334   if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6335     return None;
6336 
6337   Type *OpTy = LHS->getType();
6338   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6339 
6340   // FIXME: Extending the code below to handle vectors.
6341   if (OpTy->isVectorTy())
6342     return None;
6343 
6344   assert(OpTy->isIntegerTy(1) && "implied by above");
6345 
6346   // Both LHS and RHS are icmps.
6347   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6348   if (LHSCmp)
6349     return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6350                               Depth);
6351 
6352   /// The LHS should be an 'or', 'and', or a 'select' instruction.  We expect
6353   /// the RHS to be an icmp.
6354   /// FIXME: Add support for and/or/select on the RHS.
6355   if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6356     if ((LHSI->getOpcode() == Instruction::And ||
6357          LHSI->getOpcode() == Instruction::Or ||
6358          LHSI->getOpcode() == Instruction::Select))
6359       return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6360                                 Depth);
6361   }
6362   return None;
6363 }
6364 
6365 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6366                                         const DataLayout &DL, bool LHSIsTrue,
6367                                         unsigned Depth) {
6368   // LHS ==> RHS by definition
6369   if (LHS == RHS)
6370     return LHSIsTrue;
6371 
6372   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6373   if (RHSCmp)
6374     return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6375                               RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6376                               LHSIsTrue, Depth);
6377   return None;
6378 }
6379 
6380 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6381 // condition dominating ContextI or nullptr, if no condition is found.
6382 static std::pair<Value *, bool>
6383 getDomPredecessorCondition(const Instruction *ContextI) {
6384   if (!ContextI || !ContextI->getParent())
6385     return {nullptr, false};
6386 
6387   // TODO: This is a poor/cheap way to determine dominance. Should we use a
6388   // dominator tree (eg, from a SimplifyQuery) instead?
6389   const BasicBlock *ContextBB = ContextI->getParent();
6390   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6391   if (!PredBB)
6392     return {nullptr, false};
6393 
6394   // We need a conditional branch in the predecessor.
6395   Value *PredCond;
6396   BasicBlock *TrueBB, *FalseBB;
6397   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6398     return {nullptr, false};
6399 
6400   // The branch should get simplified. Don't bother simplifying this condition.
6401   if (TrueBB == FalseBB)
6402     return {nullptr, false};
6403 
6404   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6405          "Predecessor block does not point to successor?");
6406 
6407   // Is this condition implied by the predecessor condition?
6408   return {PredCond, TrueBB == ContextBB};
6409 }
6410 
6411 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6412                                              const Instruction *ContextI,
6413                                              const DataLayout &DL) {
6414   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6415   auto PredCond = getDomPredecessorCondition(ContextI);
6416   if (PredCond.first)
6417     return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6418   return None;
6419 }
6420 
6421 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6422                                              const Value *LHS, const Value *RHS,
6423                                              const Instruction *ContextI,
6424                                              const DataLayout &DL) {
6425   auto PredCond = getDomPredecessorCondition(ContextI);
6426   if (PredCond.first)
6427     return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6428                               PredCond.second);
6429   return None;
6430 }
6431 
6432 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6433                               APInt &Upper, const InstrInfoQuery &IIQ) {
6434   unsigned Width = Lower.getBitWidth();
6435   const APInt *C;
6436   switch (BO.getOpcode()) {
6437   case Instruction::Add:
6438     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6439       // FIXME: If we have both nuw and nsw, we should reduce the range further.
6440       if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6441         // 'add nuw x, C' produces [C, UINT_MAX].
6442         Lower = *C;
6443       } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6444         if (C->isNegative()) {
6445           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6446           Lower = APInt::getSignedMinValue(Width);
6447           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6448         } else {
6449           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6450           Lower = APInt::getSignedMinValue(Width) + *C;
6451           Upper = APInt::getSignedMaxValue(Width) + 1;
6452         }
6453       }
6454     }
6455     break;
6456 
6457   case Instruction::And:
6458     if (match(BO.getOperand(1), m_APInt(C)))
6459       // 'and x, C' produces [0, C].
6460       Upper = *C + 1;
6461     break;
6462 
6463   case Instruction::Or:
6464     if (match(BO.getOperand(1), m_APInt(C)))
6465       // 'or x, C' produces [C, UINT_MAX].
6466       Lower = *C;
6467     break;
6468 
6469   case Instruction::AShr:
6470     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6471       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6472       Lower = APInt::getSignedMinValue(Width).ashr(*C);
6473       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6474     } else if (match(BO.getOperand(0), m_APInt(C))) {
6475       unsigned ShiftAmount = Width - 1;
6476       if (!C->isNullValue() && IIQ.isExact(&BO))
6477         ShiftAmount = C->countTrailingZeros();
6478       if (C->isNegative()) {
6479         // 'ashr C, x' produces [C, C >> (Width-1)]
6480         Lower = *C;
6481         Upper = C->ashr(ShiftAmount) + 1;
6482       } else {
6483         // 'ashr C, x' produces [C >> (Width-1), C]
6484         Lower = C->ashr(ShiftAmount);
6485         Upper = *C + 1;
6486       }
6487     }
6488     break;
6489 
6490   case Instruction::LShr:
6491     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6492       // 'lshr x, C' produces [0, UINT_MAX >> C].
6493       Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
6494     } else if (match(BO.getOperand(0), m_APInt(C))) {
6495       // 'lshr C, x' produces [C >> (Width-1), C].
6496       unsigned ShiftAmount = Width - 1;
6497       if (!C->isNullValue() && IIQ.isExact(&BO))
6498         ShiftAmount = C->countTrailingZeros();
6499       Lower = C->lshr(ShiftAmount);
6500       Upper = *C + 1;
6501     }
6502     break;
6503 
6504   case Instruction::Shl:
6505     if (match(BO.getOperand(0), m_APInt(C))) {
6506       if (IIQ.hasNoUnsignedWrap(&BO)) {
6507         // 'shl nuw C, x' produces [C, C << CLZ(C)]
6508         Lower = *C;
6509         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6510       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6511         if (C->isNegative()) {
6512           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6513           unsigned ShiftAmount = C->countLeadingOnes() - 1;
6514           Lower = C->shl(ShiftAmount);
6515           Upper = *C + 1;
6516         } else {
6517           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6518           unsigned ShiftAmount = C->countLeadingZeros() - 1;
6519           Lower = *C;
6520           Upper = C->shl(ShiftAmount) + 1;
6521         }
6522       }
6523     }
6524     break;
6525 
6526   case Instruction::SDiv:
6527     if (match(BO.getOperand(1), m_APInt(C))) {
6528       APInt IntMin = APInt::getSignedMinValue(Width);
6529       APInt IntMax = APInt::getSignedMaxValue(Width);
6530       if (C->isAllOnesValue()) {
6531         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6532         //    where C != -1 and C != 0 and C != 1
6533         Lower = IntMin + 1;
6534         Upper = IntMax + 1;
6535       } else if (C->countLeadingZeros() < Width - 1) {
6536         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6537         //    where C != -1 and C != 0 and C != 1
6538         Lower = IntMin.sdiv(*C);
6539         Upper = IntMax.sdiv(*C);
6540         if (Lower.sgt(Upper))
6541           std::swap(Lower, Upper);
6542         Upper = Upper + 1;
6543         assert(Upper != Lower && "Upper part of range has wrapped!");
6544       }
6545     } else if (match(BO.getOperand(0), m_APInt(C))) {
6546       if (C->isMinSignedValue()) {
6547         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6548         Lower = *C;
6549         Upper = Lower.lshr(1) + 1;
6550       } else {
6551         // 'sdiv C, x' produces [-|C|, |C|].
6552         Upper = C->abs() + 1;
6553         Lower = (-Upper) + 1;
6554       }
6555     }
6556     break;
6557 
6558   case Instruction::UDiv:
6559     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6560       // 'udiv x, C' produces [0, UINT_MAX / C].
6561       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6562     } else if (match(BO.getOperand(0), m_APInt(C))) {
6563       // 'udiv C, x' produces [0, C].
6564       Upper = *C + 1;
6565     }
6566     break;
6567 
6568   case Instruction::SRem:
6569     if (match(BO.getOperand(1), m_APInt(C))) {
6570       // 'srem x, C' produces (-|C|, |C|).
6571       Upper = C->abs();
6572       Lower = (-Upper) + 1;
6573     }
6574     break;
6575 
6576   case Instruction::URem:
6577     if (match(BO.getOperand(1), m_APInt(C)))
6578       // 'urem x, C' produces [0, C).
6579       Upper = *C;
6580     break;
6581 
6582   default:
6583     break;
6584   }
6585 }
6586 
6587 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6588                                   APInt &Upper) {
6589   unsigned Width = Lower.getBitWidth();
6590   const APInt *C;
6591   switch (II.getIntrinsicID()) {
6592   case Intrinsic::ctpop:
6593   case Intrinsic::ctlz:
6594   case Intrinsic::cttz:
6595     // Maximum of set/clear bits is the bit width.
6596     assert(Lower == 0 && "Expected lower bound to be zero");
6597     Upper = Width + 1;
6598     break;
6599   case Intrinsic::uadd_sat:
6600     // uadd.sat(x, C) produces [C, UINT_MAX].
6601     if (match(II.getOperand(0), m_APInt(C)) ||
6602         match(II.getOperand(1), m_APInt(C)))
6603       Lower = *C;
6604     break;
6605   case Intrinsic::sadd_sat:
6606     if (match(II.getOperand(0), m_APInt(C)) ||
6607         match(II.getOperand(1), m_APInt(C))) {
6608       if (C->isNegative()) {
6609         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6610         Lower = APInt::getSignedMinValue(Width);
6611         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6612       } else {
6613         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6614         Lower = APInt::getSignedMinValue(Width) + *C;
6615         Upper = APInt::getSignedMaxValue(Width) + 1;
6616       }
6617     }
6618     break;
6619   case Intrinsic::usub_sat:
6620     // usub.sat(C, x) produces [0, C].
6621     if (match(II.getOperand(0), m_APInt(C)))
6622       Upper = *C + 1;
6623     // usub.sat(x, C) produces [0, UINT_MAX - C].
6624     else if (match(II.getOperand(1), m_APInt(C)))
6625       Upper = APInt::getMaxValue(Width) - *C + 1;
6626     break;
6627   case Intrinsic::ssub_sat:
6628     if (match(II.getOperand(0), m_APInt(C))) {
6629       if (C->isNegative()) {
6630         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6631         Lower = APInt::getSignedMinValue(Width);
6632         Upper = *C - APInt::getSignedMinValue(Width) + 1;
6633       } else {
6634         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6635         Lower = *C - APInt::getSignedMaxValue(Width);
6636         Upper = APInt::getSignedMaxValue(Width) + 1;
6637       }
6638     } else if (match(II.getOperand(1), m_APInt(C))) {
6639       if (C->isNegative()) {
6640         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6641         Lower = APInt::getSignedMinValue(Width) - *C;
6642         Upper = APInt::getSignedMaxValue(Width) + 1;
6643       } else {
6644         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6645         Lower = APInt::getSignedMinValue(Width);
6646         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6647       }
6648     }
6649     break;
6650   case Intrinsic::umin:
6651   case Intrinsic::umax:
6652   case Intrinsic::smin:
6653   case Intrinsic::smax:
6654     if (!match(II.getOperand(0), m_APInt(C)) &&
6655         !match(II.getOperand(1), m_APInt(C)))
6656       break;
6657 
6658     switch (II.getIntrinsicID()) {
6659     case Intrinsic::umin:
6660       Upper = *C + 1;
6661       break;
6662     case Intrinsic::umax:
6663       Lower = *C;
6664       break;
6665     case Intrinsic::smin:
6666       Lower = APInt::getSignedMinValue(Width);
6667       Upper = *C + 1;
6668       break;
6669     case Intrinsic::smax:
6670       Lower = *C;
6671       Upper = APInt::getSignedMaxValue(Width) + 1;
6672       break;
6673     default:
6674       llvm_unreachable("Must be min/max intrinsic");
6675     }
6676     break;
6677   case Intrinsic::abs:
6678     // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6679     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6680     if (match(II.getOperand(1), m_One()))
6681       Upper = APInt::getSignedMaxValue(Width) + 1;
6682     else
6683       Upper = APInt::getSignedMinValue(Width) + 1;
6684     break;
6685   default:
6686     break;
6687   }
6688 }
6689 
6690 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6691                                       APInt &Upper, const InstrInfoQuery &IIQ) {
6692   const Value *LHS = nullptr, *RHS = nullptr;
6693   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6694   if (R.Flavor == SPF_UNKNOWN)
6695     return;
6696 
6697   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6698 
6699   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6700     // If the negation part of the abs (in RHS) has the NSW flag,
6701     // then the result of abs(X) is [0..SIGNED_MAX],
6702     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6703     Lower = APInt::getNullValue(BitWidth);
6704     if (match(RHS, m_Neg(m_Specific(LHS))) &&
6705         IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6706       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6707     else
6708       Upper = APInt::getSignedMinValue(BitWidth) + 1;
6709     return;
6710   }
6711 
6712   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6713     // The result of -abs(X) is <= 0.
6714     Lower = APInt::getSignedMinValue(BitWidth);
6715     Upper = APInt(BitWidth, 1);
6716     return;
6717   }
6718 
6719   const APInt *C;
6720   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6721     return;
6722 
6723   switch (R.Flavor) {
6724     case SPF_UMIN:
6725       Upper = *C + 1;
6726       break;
6727     case SPF_UMAX:
6728       Lower = *C;
6729       break;
6730     case SPF_SMIN:
6731       Lower = APInt::getSignedMinValue(BitWidth);
6732       Upper = *C + 1;
6733       break;
6734     case SPF_SMAX:
6735       Lower = *C;
6736       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6737       break;
6738     default:
6739       break;
6740   }
6741 }
6742 
6743 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
6744                                          AssumptionCache *AC,
6745                                          const Instruction *CtxI,
6746                                          unsigned Depth) {
6747   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
6748 
6749   if (Depth == MaxAnalysisRecursionDepth)
6750     return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
6751 
6752   const APInt *C;
6753   if (match(V, m_APInt(C)))
6754     return ConstantRange(*C);
6755 
6756   InstrInfoQuery IIQ(UseInstrInfo);
6757   unsigned BitWidth = V->getType()->getScalarSizeInBits();
6758   APInt Lower = APInt(BitWidth, 0);
6759   APInt Upper = APInt(BitWidth, 0);
6760   if (auto *BO = dyn_cast<BinaryOperator>(V))
6761     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
6762   else if (auto *II = dyn_cast<IntrinsicInst>(V))
6763     setLimitsForIntrinsic(*II, Lower, Upper);
6764   else if (auto *SI = dyn_cast<SelectInst>(V))
6765     setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
6766 
6767   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
6768 
6769   if (auto *I = dyn_cast<Instruction>(V))
6770     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
6771       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
6772 
6773   if (CtxI && AC) {
6774     // Try to restrict the range based on information from assumptions.
6775     for (auto &AssumeVH : AC->assumptionsFor(V)) {
6776       if (!AssumeVH)
6777         continue;
6778       CallInst *I = cast<CallInst>(AssumeVH);
6779       assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
6780              "Got assumption for the wrong function!");
6781       assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
6782              "must be an assume intrinsic");
6783 
6784       if (!isValidAssumeForContext(I, CtxI, nullptr))
6785         continue;
6786       Value *Arg = I->getArgOperand(0);
6787       ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
6788       // Currently we just use information from comparisons.
6789       if (!Cmp || Cmp->getOperand(0) != V)
6790         continue;
6791       ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
6792                                                AC, I, Depth + 1);
6793       CR = CR.intersectWith(
6794           ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS));
6795     }
6796   }
6797 
6798   return CR;
6799 }
6800 
6801 static Optional<int64_t>
6802 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
6803   // Skip over the first indices.
6804   gep_type_iterator GTI = gep_type_begin(GEP);
6805   for (unsigned i = 1; i != Idx; ++i, ++GTI)
6806     /*skip along*/;
6807 
6808   // Compute the offset implied by the rest of the indices.
6809   int64_t Offset = 0;
6810   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
6811     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
6812     if (!OpC)
6813       return None;
6814     if (OpC->isZero())
6815       continue; // No offset.
6816 
6817     // Handle struct indices, which add their field offset to the pointer.
6818     if (StructType *STy = GTI.getStructTypeOrNull()) {
6819       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
6820       continue;
6821     }
6822 
6823     // Otherwise, we have a sequential type like an array or fixed-length
6824     // vector. Multiply the index by the ElementSize.
6825     TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
6826     if (Size.isScalable())
6827       return None;
6828     Offset += Size.getFixedSize() * OpC->getSExtValue();
6829   }
6830 
6831   return Offset;
6832 }
6833 
6834 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
6835                                         const DataLayout &DL) {
6836   Ptr1 = Ptr1->stripPointerCasts();
6837   Ptr2 = Ptr2->stripPointerCasts();
6838 
6839   // Handle the trivial case first.
6840   if (Ptr1 == Ptr2) {
6841     return 0;
6842   }
6843 
6844   const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
6845   const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
6846 
6847   // If one pointer is a GEP see if the GEP is a constant offset from the base,
6848   // as in "P" and "gep P, 1".
6849   // Also do this iteratively to handle the the following case:
6850   //   Ptr_t1 = GEP Ptr1, c1
6851   //   Ptr_t2 = GEP Ptr_t1, c2
6852   //   Ptr2 = GEP Ptr_t2, c3
6853   // where we will return c1+c2+c3.
6854   // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
6855   // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
6856   // are the same, and return the difference between offsets.
6857   auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
6858                                  const Value *Ptr) -> Optional<int64_t> {
6859     const GEPOperator *GEP_T = GEP;
6860     int64_t OffsetVal = 0;
6861     bool HasSameBase = false;
6862     while (GEP_T) {
6863       auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
6864       if (!Offset)
6865         return None;
6866       OffsetVal += *Offset;
6867       auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
6868       if (Op0 == Ptr) {
6869         HasSameBase = true;
6870         break;
6871       }
6872       GEP_T = dyn_cast<GEPOperator>(Op0);
6873     }
6874     if (!HasSameBase)
6875       return None;
6876     return OffsetVal;
6877   };
6878 
6879   if (GEP1) {
6880     auto Offset = getOffsetFromBase(GEP1, Ptr2);
6881     if (Offset)
6882       return -*Offset;
6883   }
6884   if (GEP2) {
6885     auto Offset = getOffsetFromBase(GEP2, Ptr1);
6886     if (Offset)
6887       return Offset;
6888   }
6889 
6890   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
6891   // base.  After that base, they may have some number of common (and
6892   // potentially variable) indices.  After that they handle some constant
6893   // offset, which determines their offset from each other.  At this point, we
6894   // handle no other case.
6895   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
6896     return None;
6897 
6898   // Skip any common indices and track the GEP types.
6899   unsigned Idx = 1;
6900   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
6901     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
6902       break;
6903 
6904   auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
6905   auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
6906   if (!Offset1 || !Offset2)
6907     return None;
6908   return *Offset2 - *Offset1;
6909 }
6910