1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/EHPersonalities.h"
30 #include "llvm/Analysis/GuardUtils.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/Loads.h"
33 #include "llvm/Analysis/LoopInfo.h"
34 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/ConstantRange.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GetElementPtrTypeIterator.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/IntrinsicsAArch64.h"
56 #include "llvm/IR/IntrinsicsRISCV.h"
57 #include "llvm/IR/IntrinsicsX86.h"
58 #include "llvm/IR/LLVMContext.h"
59 #include "llvm/IR/Metadata.h"
60 #include "llvm/IR/Module.h"
61 #include "llvm/IR/Operator.h"
62 #include "llvm/IR/PatternMatch.h"
63 #include "llvm/IR/Type.h"
64 #include "llvm/IR/User.h"
65 #include "llvm/IR/Value.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CommandLine.h"
68 #include "llvm/Support/Compiler.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/KnownBits.h"
71 #include "llvm/Support/MathExtras.h"
72 #include <algorithm>
73 #include <array>
74 #include <cassert>
75 #include <cstdint>
76 #include <iterator>
77 #include <utility>
78 
79 using namespace llvm;
80 using namespace llvm::PatternMatch;
81 
82 // Controls the number of uses of the value searched for possible
83 // dominating comparisons.
84 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
85                                               cl::Hidden, cl::init(20));
86 
87 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
88 /// returns the element type's bitwidth.
89 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
90   if (unsigned BitWidth = Ty->getScalarSizeInBits())
91     return BitWidth;
92 
93   return DL.getPointerTypeSizeInBits(Ty);
94 }
95 
96 namespace {
97 
98 // Simplifying using an assume can only be done in a particular control-flow
99 // context (the context instruction provides that context). If an assume and
100 // the context instruction are not in the same block then the DT helps in
101 // figuring out if we can use it.
102 struct Query {
103   const DataLayout &DL;
104   AssumptionCache *AC;
105   const Instruction *CxtI;
106   const DominatorTree *DT;
107 
108   // Unlike the other analyses, this may be a nullptr because not all clients
109   // provide it currently.
110   OptimizationRemarkEmitter *ORE;
111 
112   /// If true, it is safe to use metadata during simplification.
113   InstrInfoQuery IIQ;
114 
115   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
116         const DominatorTree *DT, bool UseInstrInfo,
117         OptimizationRemarkEmitter *ORE = nullptr)
118       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
119 };
120 
121 } // end anonymous namespace
122 
123 // Given the provided Value and, potentially, a context instruction, return
124 // the preferred context instruction (if any).
125 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
126   // If we've been provided with a context instruction, then use that (provided
127   // it has been inserted).
128   if (CxtI && CxtI->getParent())
129     return CxtI;
130 
131   // If the value is really an already-inserted instruction, then use that.
132   CxtI = dyn_cast<Instruction>(V);
133   if (CxtI && CxtI->getParent())
134     return CxtI;
135 
136   return nullptr;
137 }
138 
139 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
140   // If we've been provided with a context instruction, then use that (provided
141   // it has been inserted).
142   if (CxtI && CxtI->getParent())
143     return CxtI;
144 
145   // If the value is really an already-inserted instruction, then use that.
146   CxtI = dyn_cast<Instruction>(V1);
147   if (CxtI && CxtI->getParent())
148     return CxtI;
149 
150   CxtI = dyn_cast<Instruction>(V2);
151   if (CxtI && CxtI->getParent())
152     return CxtI;
153 
154   return nullptr;
155 }
156 
157 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
158                                    const APInt &DemandedElts,
159                                    APInt &DemandedLHS, APInt &DemandedRHS) {
160   // The length of scalable vectors is unknown at compile time, thus we
161   // cannot check their values
162   if (isa<ScalableVectorType>(Shuf->getType()))
163     return false;
164 
165   int NumElts =
166       cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
167   int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
168   DemandedLHS = DemandedRHS = APInt::getZero(NumElts);
169   if (DemandedElts.isNullValue())
170     return true;
171   // Simple case of a shuffle with zeroinitializer.
172   if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
173     DemandedLHS.setBit(0);
174     return true;
175   }
176   for (int i = 0; i != NumMaskElts; ++i) {
177     if (!DemandedElts[i])
178       continue;
179     int M = Shuf->getMaskValue(i);
180     assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
181 
182     // For undef elements, we don't know anything about the common state of
183     // the shuffle result.
184     if (M == -1)
185       return false;
186     if (M < NumElts)
187       DemandedLHS.setBit(M % NumElts);
188     else
189       DemandedRHS.setBit(M % NumElts);
190   }
191 
192   return true;
193 }
194 
195 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
196                              KnownBits &Known, unsigned Depth, const Query &Q);
197 
198 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
199                              const Query &Q) {
200   // FIXME: We currently have no way to represent the DemandedElts of a scalable
201   // vector
202   if (isa<ScalableVectorType>(V->getType())) {
203     Known.resetAll();
204     return;
205   }
206 
207   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
208   APInt DemandedElts =
209       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
210   computeKnownBits(V, DemandedElts, Known, Depth, Q);
211 }
212 
213 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
214                             const DataLayout &DL, unsigned Depth,
215                             AssumptionCache *AC, const Instruction *CxtI,
216                             const DominatorTree *DT,
217                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
218   ::computeKnownBits(V, Known, Depth,
219                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
220 }
221 
222 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
223                             KnownBits &Known, const DataLayout &DL,
224                             unsigned Depth, AssumptionCache *AC,
225                             const Instruction *CxtI, const DominatorTree *DT,
226                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
227   ::computeKnownBits(V, DemandedElts, Known, Depth,
228                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
229 }
230 
231 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
232                                   unsigned Depth, const Query &Q);
233 
234 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
235                                   const Query &Q);
236 
237 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
238                                  unsigned Depth, AssumptionCache *AC,
239                                  const Instruction *CxtI,
240                                  const DominatorTree *DT,
241                                  OptimizationRemarkEmitter *ORE,
242                                  bool UseInstrInfo) {
243   return ::computeKnownBits(
244       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
245 }
246 
247 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
248                                  const DataLayout &DL, unsigned Depth,
249                                  AssumptionCache *AC, const Instruction *CxtI,
250                                  const DominatorTree *DT,
251                                  OptimizationRemarkEmitter *ORE,
252                                  bool UseInstrInfo) {
253   return ::computeKnownBits(
254       V, DemandedElts, Depth,
255       Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
256 }
257 
258 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
259                                const DataLayout &DL, AssumptionCache *AC,
260                                const Instruction *CxtI, const DominatorTree *DT,
261                                bool UseInstrInfo) {
262   assert(LHS->getType() == RHS->getType() &&
263          "LHS and RHS should have the same type");
264   assert(LHS->getType()->isIntOrIntVectorTy() &&
265          "LHS and RHS should be integers");
266   // Look for an inverted mask: (X & ~M) op (Y & M).
267   Value *M;
268   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
269       match(RHS, m_c_And(m_Specific(M), m_Value())))
270     return true;
271   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
272       match(LHS, m_c_And(m_Specific(M), m_Value())))
273     return true;
274   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
275   KnownBits LHSKnown(IT->getBitWidth());
276   KnownBits RHSKnown(IT->getBitWidth());
277   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
278   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
279   return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
280 }
281 
282 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
283   for (const User *U : CxtI->users()) {
284     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
285       if (IC->isEquality())
286         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
287           if (C->isNullValue())
288             continue;
289     return false;
290   }
291   return true;
292 }
293 
294 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
295                                    const Query &Q);
296 
297 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
298                                   bool OrZero, unsigned Depth,
299                                   AssumptionCache *AC, const Instruction *CxtI,
300                                   const DominatorTree *DT, bool UseInstrInfo) {
301   return ::isKnownToBeAPowerOfTwo(
302       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
303 }
304 
305 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
306                            unsigned Depth, const Query &Q);
307 
308 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
309 
310 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
311                           AssumptionCache *AC, const Instruction *CxtI,
312                           const DominatorTree *DT, bool UseInstrInfo) {
313   return ::isKnownNonZero(V, Depth,
314                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
315 }
316 
317 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
318                               unsigned Depth, AssumptionCache *AC,
319                               const Instruction *CxtI, const DominatorTree *DT,
320                               bool UseInstrInfo) {
321   KnownBits Known =
322       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
323   return Known.isNonNegative();
324 }
325 
326 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
327                            AssumptionCache *AC, const Instruction *CxtI,
328                            const DominatorTree *DT, bool UseInstrInfo) {
329   if (auto *CI = dyn_cast<ConstantInt>(V))
330     return CI->getValue().isStrictlyPositive();
331 
332   // TODO: We'd doing two recursive queries here.  We should factor this such
333   // that only a single query is needed.
334   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
335          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
336 }
337 
338 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
339                            AssumptionCache *AC, const Instruction *CxtI,
340                            const DominatorTree *DT, bool UseInstrInfo) {
341   KnownBits Known =
342       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
343   return Known.isNegative();
344 }
345 
346 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
347                             const Query &Q);
348 
349 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
350                            const DataLayout &DL, AssumptionCache *AC,
351                            const Instruction *CxtI, const DominatorTree *DT,
352                            bool UseInstrInfo) {
353   return ::isKnownNonEqual(V1, V2, 0,
354                            Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
355                                  UseInstrInfo, /*ORE=*/nullptr));
356 }
357 
358 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
359                               const Query &Q);
360 
361 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
362                              const DataLayout &DL, unsigned Depth,
363                              AssumptionCache *AC, const Instruction *CxtI,
364                              const DominatorTree *DT, bool UseInstrInfo) {
365   return ::MaskedValueIsZero(
366       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
367 }
368 
369 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
370                                    unsigned Depth, const Query &Q);
371 
372 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
373                                    const Query &Q) {
374   // FIXME: We currently have no way to represent the DemandedElts of a scalable
375   // vector
376   if (isa<ScalableVectorType>(V->getType()))
377     return 1;
378 
379   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
380   APInt DemandedElts =
381       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
382   return ComputeNumSignBits(V, DemandedElts, Depth, Q);
383 }
384 
385 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
386                                   unsigned Depth, AssumptionCache *AC,
387                                   const Instruction *CxtI,
388                                   const DominatorTree *DT, bool UseInstrInfo) {
389   return ::ComputeNumSignBits(
390       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
391 }
392 
393 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
394                                    bool NSW, const APInt &DemandedElts,
395                                    KnownBits &KnownOut, KnownBits &Known2,
396                                    unsigned Depth, const Query &Q) {
397   computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
398 
399   // If one operand is unknown and we have no nowrap information,
400   // the result will be unknown independently of the second operand.
401   if (KnownOut.isUnknown() && !NSW)
402     return;
403 
404   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
405   KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
406 }
407 
408 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
409                                 const APInt &DemandedElts, KnownBits &Known,
410                                 KnownBits &Known2, unsigned Depth,
411                                 const Query &Q) {
412   computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
413   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
414 
415   bool isKnownNegative = false;
416   bool isKnownNonNegative = false;
417   // If the multiplication is known not to overflow, compute the sign bit.
418   if (NSW) {
419     if (Op0 == Op1) {
420       // The product of a number with itself is non-negative.
421       isKnownNonNegative = true;
422     } else {
423       bool isKnownNonNegativeOp1 = Known.isNonNegative();
424       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
425       bool isKnownNegativeOp1 = Known.isNegative();
426       bool isKnownNegativeOp0 = Known2.isNegative();
427       // The product of two numbers with the same sign is non-negative.
428       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
429                            (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
430       // The product of a negative number and a non-negative number is either
431       // negative or zero.
432       if (!isKnownNonNegative)
433         isKnownNegative =
434             (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
435              Known2.isNonZero()) ||
436             (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
437     }
438   }
439 
440   Known = KnownBits::mul(Known, Known2);
441 
442   // Only make use of no-wrap flags if we failed to compute the sign bit
443   // directly.  This matters if the multiplication always overflows, in
444   // which case we prefer to follow the result of the direct computation,
445   // though as the program is invoking undefined behaviour we can choose
446   // whatever we like here.
447   if (isKnownNonNegative && !Known.isNegative())
448     Known.makeNonNegative();
449   else if (isKnownNegative && !Known.isNonNegative())
450     Known.makeNegative();
451 }
452 
453 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
454                                              KnownBits &Known) {
455   unsigned BitWidth = Known.getBitWidth();
456   unsigned NumRanges = Ranges.getNumOperands() / 2;
457   assert(NumRanges >= 1);
458 
459   Known.Zero.setAllBits();
460   Known.One.setAllBits();
461 
462   for (unsigned i = 0; i < NumRanges; ++i) {
463     ConstantInt *Lower =
464         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
465     ConstantInt *Upper =
466         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
467     ConstantRange Range(Lower->getValue(), Upper->getValue());
468 
469     // The first CommonPrefixBits of all values in Range are equal.
470     unsigned CommonPrefixBits =
471         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
472     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
473     APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
474     Known.One &= UnsignedMax & Mask;
475     Known.Zero &= ~UnsignedMax & Mask;
476   }
477 }
478 
479 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
480   SmallVector<const Value *, 16> WorkSet(1, I);
481   SmallPtrSet<const Value *, 32> Visited;
482   SmallPtrSet<const Value *, 16> EphValues;
483 
484   // The instruction defining an assumption's condition itself is always
485   // considered ephemeral to that assumption (even if it has other
486   // non-ephemeral users). See r246696's test case for an example.
487   if (is_contained(I->operands(), E))
488     return true;
489 
490   while (!WorkSet.empty()) {
491     const Value *V = WorkSet.pop_back_val();
492     if (!Visited.insert(V).second)
493       continue;
494 
495     // If all uses of this value are ephemeral, then so is this value.
496     if (llvm::all_of(V->users(), [&](const User *U) {
497                                    return EphValues.count(U);
498                                  })) {
499       if (V == E)
500         return true;
501 
502       if (V == I || isSafeToSpeculativelyExecute(V)) {
503        EphValues.insert(V);
504        if (const User *U = dyn_cast<User>(V))
505          append_range(WorkSet, U->operands());
506       }
507     }
508   }
509 
510   return false;
511 }
512 
513 // Is this an intrinsic that cannot be speculated but also cannot trap?
514 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
515   if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
516     return CI->isAssumeLikeIntrinsic();
517 
518   return false;
519 }
520 
521 bool llvm::isValidAssumeForContext(const Instruction *Inv,
522                                    const Instruction *CxtI,
523                                    const DominatorTree *DT) {
524   // There are two restrictions on the use of an assume:
525   //  1. The assume must dominate the context (or the control flow must
526   //     reach the assume whenever it reaches the context).
527   //  2. The context must not be in the assume's set of ephemeral values
528   //     (otherwise we will use the assume to prove that the condition
529   //     feeding the assume is trivially true, thus causing the removal of
530   //     the assume).
531 
532   if (Inv->getParent() == CxtI->getParent()) {
533     // If Inv and CtxI are in the same block, check if the assume (Inv) is first
534     // in the BB.
535     if (Inv->comesBefore(CxtI))
536       return true;
537 
538     // Don't let an assume affect itself - this would cause the problems
539     // `isEphemeralValueOf` is trying to prevent, and it would also make
540     // the loop below go out of bounds.
541     if (Inv == CxtI)
542       return false;
543 
544     // The context comes first, but they're both in the same block.
545     // Make sure there is nothing in between that might interrupt
546     // the control flow, not even CxtI itself.
547     // We limit the scan distance between the assume and its context instruction
548     // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
549     // it can be adjusted if needed (could be turned into a cl::opt).
550     unsigned ScanLimit = 15;
551     for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
552       if (!isGuaranteedToTransferExecutionToSuccessor(&*I) || --ScanLimit == 0)
553         return false;
554 
555     return !isEphemeralValueOf(Inv, CxtI);
556   }
557 
558   // Inv and CxtI are in different blocks.
559   if (DT) {
560     if (DT->dominates(Inv, CxtI))
561       return true;
562   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
563     // We don't have a DT, but this trivially dominates.
564     return true;
565   }
566 
567   return false;
568 }
569 
570 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
571   // v u> y implies v != 0.
572   if (Pred == ICmpInst::ICMP_UGT)
573     return true;
574 
575   // Special-case v != 0 to also handle v != null.
576   if (Pred == ICmpInst::ICMP_NE)
577     return match(RHS, m_Zero());
578 
579   // All other predicates - rely on generic ConstantRange handling.
580   const APInt *C;
581   if (!match(RHS, m_APInt(C)))
582     return false;
583 
584   ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
585   return !TrueValues.contains(APInt::getZero(C->getBitWidth()));
586 }
587 
588 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
589   // Use of assumptions is context-sensitive. If we don't have a context, we
590   // cannot use them!
591   if (!Q.AC || !Q.CxtI)
592     return false;
593 
594   if (Q.CxtI && V->getType()->isPointerTy()) {
595     SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
596     if (!NullPointerIsDefined(Q.CxtI->getFunction(),
597                               V->getType()->getPointerAddressSpace()))
598       AttrKinds.push_back(Attribute::Dereferenceable);
599 
600     if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
601       return true;
602   }
603 
604   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
605     if (!AssumeVH)
606       continue;
607     CallInst *I = cast<CallInst>(AssumeVH);
608     assert(I->getFunction() == Q.CxtI->getFunction() &&
609            "Got assumption for the wrong function!");
610 
611     // Warning: This loop can end up being somewhat performance sensitive.
612     // We're running this loop for once for each value queried resulting in a
613     // runtime of ~O(#assumes * #values).
614 
615     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
616            "must be an assume intrinsic");
617 
618     Value *RHS;
619     CmpInst::Predicate Pred;
620     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
621     if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
622       return false;
623 
624     if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
625       return true;
626   }
627 
628   return false;
629 }
630 
631 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
632                                        unsigned Depth, const Query &Q) {
633   // Use of assumptions is context-sensitive. If we don't have a context, we
634   // cannot use them!
635   if (!Q.AC || !Q.CxtI)
636     return;
637 
638   unsigned BitWidth = Known.getBitWidth();
639 
640   // Refine Known set if the pointer alignment is set by assume bundles.
641   if (V->getType()->isPointerTy()) {
642     if (RetainedKnowledge RK = getKnowledgeValidInContext(
643             V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
644       Known.Zero.setLowBits(Log2_32(RK.ArgValue));
645     }
646   }
647 
648   // Note that the patterns below need to be kept in sync with the code
649   // in AssumptionCache::updateAffectedValues.
650 
651   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
652     if (!AssumeVH)
653       continue;
654     CallInst *I = cast<CallInst>(AssumeVH);
655     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
656            "Got assumption for the wrong function!");
657 
658     // Warning: This loop can end up being somewhat performance sensitive.
659     // We're running this loop for once for each value queried resulting in a
660     // runtime of ~O(#assumes * #values).
661 
662     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
663            "must be an assume intrinsic");
664 
665     Value *Arg = I->getArgOperand(0);
666 
667     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
668       assert(BitWidth == 1 && "assume operand is not i1?");
669       Known.setAllOnes();
670       return;
671     }
672     if (match(Arg, m_Not(m_Specific(V))) &&
673         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
674       assert(BitWidth == 1 && "assume operand is not i1?");
675       Known.setAllZero();
676       return;
677     }
678 
679     // The remaining tests are all recursive, so bail out if we hit the limit.
680     if (Depth == MaxAnalysisRecursionDepth)
681       continue;
682 
683     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
684     if (!Cmp)
685       continue;
686 
687     // We are attempting to compute known bits for the operands of an assume.
688     // Do not try to use other assumptions for those recursive calls because
689     // that can lead to mutual recursion and a compile-time explosion.
690     // An example of the mutual recursion: computeKnownBits can call
691     // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
692     // and so on.
693     Query QueryNoAC = Q;
694     QueryNoAC.AC = nullptr;
695 
696     // Note that ptrtoint may change the bitwidth.
697     Value *A, *B;
698     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
699 
700     CmpInst::Predicate Pred;
701     uint64_t C;
702     switch (Cmp->getPredicate()) {
703     default:
704       break;
705     case ICmpInst::ICMP_EQ:
706       // assume(v = a)
707       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
708           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
709         KnownBits RHSKnown =
710             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
711         Known.Zero |= RHSKnown.Zero;
712         Known.One  |= RHSKnown.One;
713       // assume(v & b = a)
714       } else if (match(Cmp,
715                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
716                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
717         KnownBits RHSKnown =
718             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
719         KnownBits MaskKnown =
720             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
721 
722         // For those bits in the mask that are known to be one, we can propagate
723         // known bits from the RHS to V.
724         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
725         Known.One  |= RHSKnown.One  & MaskKnown.One;
726       // assume(~(v & b) = a)
727       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
728                                      m_Value(A))) &&
729                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
730         KnownBits RHSKnown =
731             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
732         KnownBits MaskKnown =
733             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
734 
735         // For those bits in the mask that are known to be one, we can propagate
736         // inverted known bits from the RHS to V.
737         Known.Zero |= RHSKnown.One  & MaskKnown.One;
738         Known.One  |= RHSKnown.Zero & MaskKnown.One;
739       // assume(v | b = a)
740       } else if (match(Cmp,
741                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
742                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
743         KnownBits RHSKnown =
744             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
745         KnownBits BKnown =
746             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
747 
748         // For those bits in B that are known to be zero, we can propagate known
749         // bits from the RHS to V.
750         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
751         Known.One  |= RHSKnown.One  & BKnown.Zero;
752       // assume(~(v | b) = a)
753       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
754                                      m_Value(A))) &&
755                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
756         KnownBits RHSKnown =
757             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
758         KnownBits BKnown =
759             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
760 
761         // For those bits in B that are known to be zero, we can propagate
762         // inverted known bits from the RHS to V.
763         Known.Zero |= RHSKnown.One  & BKnown.Zero;
764         Known.One  |= RHSKnown.Zero & BKnown.Zero;
765       // assume(v ^ b = a)
766       } else if (match(Cmp,
767                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
768                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
769         KnownBits RHSKnown =
770             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
771         KnownBits BKnown =
772             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
773 
774         // For those bits in B that are known to be zero, we can propagate known
775         // bits from the RHS to V. For those bits in B that are known to be one,
776         // we can propagate inverted known bits from the RHS to V.
777         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
778         Known.One  |= RHSKnown.One  & BKnown.Zero;
779         Known.Zero |= RHSKnown.One  & BKnown.One;
780         Known.One  |= RHSKnown.Zero & BKnown.One;
781       // assume(~(v ^ b) = a)
782       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
783                                      m_Value(A))) &&
784                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
785         KnownBits RHSKnown =
786             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
787         KnownBits BKnown =
788             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
789 
790         // For those bits in B that are known to be zero, we can propagate
791         // inverted known bits from the RHS to V. For those bits in B that are
792         // known to be one, we can propagate known bits from the RHS to V.
793         Known.Zero |= RHSKnown.One  & BKnown.Zero;
794         Known.One  |= RHSKnown.Zero & BKnown.Zero;
795         Known.Zero |= RHSKnown.Zero & BKnown.One;
796         Known.One  |= RHSKnown.One  & BKnown.One;
797       // assume(v << c = a)
798       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
799                                      m_Value(A))) &&
800                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
801         KnownBits RHSKnown =
802             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
803 
804         // For those bits in RHS that are known, we can propagate them to known
805         // bits in V shifted to the right by C.
806         RHSKnown.Zero.lshrInPlace(C);
807         Known.Zero |= RHSKnown.Zero;
808         RHSKnown.One.lshrInPlace(C);
809         Known.One  |= RHSKnown.One;
810       // assume(~(v << c) = a)
811       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
812                                      m_Value(A))) &&
813                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
814         KnownBits RHSKnown =
815             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
816         // For those bits in RHS that are known, we can propagate them inverted
817         // to known bits in V shifted to the right by C.
818         RHSKnown.One.lshrInPlace(C);
819         Known.Zero |= RHSKnown.One;
820         RHSKnown.Zero.lshrInPlace(C);
821         Known.One  |= RHSKnown.Zero;
822       // assume(v >> c = a)
823       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
824                                      m_Value(A))) &&
825                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
826         KnownBits RHSKnown =
827             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
828         // For those bits in RHS that are known, we can propagate them to known
829         // bits in V shifted to the right by C.
830         Known.Zero |= RHSKnown.Zero << C;
831         Known.One  |= RHSKnown.One  << C;
832       // assume(~(v >> c) = a)
833       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
834                                      m_Value(A))) &&
835                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
836         KnownBits RHSKnown =
837             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
838         // For those bits in RHS that are known, we can propagate them inverted
839         // to known bits in V shifted to the right by C.
840         Known.Zero |= RHSKnown.One  << C;
841         Known.One  |= RHSKnown.Zero << C;
842       }
843       break;
844     case ICmpInst::ICMP_SGE:
845       // assume(v >=_s c) where c is non-negative
846       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
847           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
848         KnownBits RHSKnown =
849             computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
850 
851         if (RHSKnown.isNonNegative()) {
852           // We know that the sign bit is zero.
853           Known.makeNonNegative();
854         }
855       }
856       break;
857     case ICmpInst::ICMP_SGT:
858       // assume(v >_s c) where c is at least -1.
859       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
860           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
861         KnownBits RHSKnown =
862             computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
863 
864         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
865           // We know that the sign bit is zero.
866           Known.makeNonNegative();
867         }
868       }
869       break;
870     case ICmpInst::ICMP_SLE:
871       // assume(v <=_s c) where c is negative
872       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
873           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
874         KnownBits RHSKnown =
875             computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
876 
877         if (RHSKnown.isNegative()) {
878           // We know that the sign bit is one.
879           Known.makeNegative();
880         }
881       }
882       break;
883     case ICmpInst::ICMP_SLT:
884       // assume(v <_s c) where c is non-positive
885       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
886           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
887         KnownBits RHSKnown =
888             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
889 
890         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
891           // We know that the sign bit is one.
892           Known.makeNegative();
893         }
894       }
895       break;
896     case ICmpInst::ICMP_ULE:
897       // assume(v <=_u c)
898       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
899           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
900         KnownBits RHSKnown =
901             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
902 
903         // Whatever high bits in c are zero are known to be zero.
904         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
905       }
906       break;
907     case ICmpInst::ICMP_ULT:
908       // assume(v <_u c)
909       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
910           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
911         KnownBits RHSKnown =
912             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
913 
914         // If the RHS is known zero, then this assumption must be wrong (nothing
915         // is unsigned less than zero). Signal a conflict and get out of here.
916         if (RHSKnown.isZero()) {
917           Known.Zero.setAllBits();
918           Known.One.setAllBits();
919           break;
920         }
921 
922         // Whatever high bits in c are zero are known to be zero (if c is a power
923         // of 2, then one more).
924         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
925           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
926         else
927           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
928       }
929       break;
930     }
931   }
932 
933   // If assumptions conflict with each other or previous known bits, then we
934   // have a logical fallacy. It's possible that the assumption is not reachable,
935   // so this isn't a real bug. On the other hand, the program may have undefined
936   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
937   // clear out the known bits, try to warn the user, and hope for the best.
938   if (Known.Zero.intersects(Known.One)) {
939     Known.resetAll();
940 
941     if (Q.ORE)
942       Q.ORE->emit([&]() {
943         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
944         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
945                                           CxtI)
946                << "Detected conflicting code assumptions. Program may "
947                   "have undefined behavior, or compiler may have "
948                   "internal error.";
949       });
950   }
951 }
952 
953 /// Compute known bits from a shift operator, including those with a
954 /// non-constant shift amount. Known is the output of this function. Known2 is a
955 /// pre-allocated temporary with the same bit width as Known and on return
956 /// contains the known bit of the shift value source. KF is an
957 /// operator-specific function that, given the known-bits and a shift amount,
958 /// compute the implied known-bits of the shift operator's result respectively
959 /// for that shift amount. The results from calling KF are conservatively
960 /// combined for all permitted shift amounts.
961 static void computeKnownBitsFromShiftOperator(
962     const Operator *I, const APInt &DemandedElts, KnownBits &Known,
963     KnownBits &Known2, unsigned Depth, const Query &Q,
964     function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
965   unsigned BitWidth = Known.getBitWidth();
966   computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
967   computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
968 
969   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
970   // BitWidth > 64 and any upper bits are known, we'll end up returning the
971   // limit value (which implies all bits are known).
972   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
973   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
974   bool ShiftAmtIsConstant = Known.isConstant();
975   bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
976 
977   if (ShiftAmtIsConstant) {
978     Known = KF(Known2, Known);
979 
980     // If the known bits conflict, this must be an overflowing left shift, so
981     // the shift result is poison. We can return anything we want. Choose 0 for
982     // the best folding opportunity.
983     if (Known.hasConflict())
984       Known.setAllZero();
985 
986     return;
987   }
988 
989   // If the shift amount could be greater than or equal to the bit-width of the
990   // LHS, the value could be poison, but bail out because the check below is
991   // expensive.
992   // TODO: Should we just carry on?
993   if (MaxShiftAmtIsOutOfRange) {
994     Known.resetAll();
995     return;
996   }
997 
998   // It would be more-clearly correct to use the two temporaries for this
999   // calculation. Reusing the APInts here to prevent unnecessary allocations.
1000   Known.resetAll();
1001 
1002   // If we know the shifter operand is nonzero, we can sometimes infer more
1003   // known bits. However this is expensive to compute, so be lazy about it and
1004   // only compute it when absolutely necessary.
1005   Optional<bool> ShifterOperandIsNonZero;
1006 
1007   // Early exit if we can't constrain any well-defined shift amount.
1008   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1009       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1010     ShifterOperandIsNonZero =
1011         isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1012     if (!*ShifterOperandIsNonZero)
1013       return;
1014   }
1015 
1016   Known.Zero.setAllBits();
1017   Known.One.setAllBits();
1018   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1019     // Combine the shifted known input bits only for those shift amounts
1020     // compatible with its known constraints.
1021     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1022       continue;
1023     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1024       continue;
1025     // If we know the shifter is nonzero, we may be able to infer more known
1026     // bits. This check is sunk down as far as possible to avoid the expensive
1027     // call to isKnownNonZero if the cheaper checks above fail.
1028     if (ShiftAmt == 0) {
1029       if (!ShifterOperandIsNonZero.hasValue())
1030         ShifterOperandIsNonZero =
1031             isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1032       if (*ShifterOperandIsNonZero)
1033         continue;
1034     }
1035 
1036     Known = KnownBits::commonBits(
1037         Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1038   }
1039 
1040   // If the known bits conflict, the result is poison. Return a 0 and hope the
1041   // caller can further optimize that.
1042   if (Known.hasConflict())
1043     Known.setAllZero();
1044 }
1045 
1046 static void computeKnownBitsFromOperator(const Operator *I,
1047                                          const APInt &DemandedElts,
1048                                          KnownBits &Known, unsigned Depth,
1049                                          const Query &Q) {
1050   unsigned BitWidth = Known.getBitWidth();
1051 
1052   KnownBits Known2(BitWidth);
1053   switch (I->getOpcode()) {
1054   default: break;
1055   case Instruction::Load:
1056     if (MDNode *MD =
1057             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1058       computeKnownBitsFromRangeMetadata(*MD, Known);
1059     break;
1060   case Instruction::And: {
1061     // If either the LHS or the RHS are Zero, the result is zero.
1062     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1063     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1064 
1065     Known &= Known2;
1066 
1067     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1068     // here we handle the more general case of adding any odd number by
1069     // matching the form add(x, add(x, y)) where y is odd.
1070     // TODO: This could be generalized to clearing any bit set in y where the
1071     // following bit is known to be unset in y.
1072     Value *X = nullptr, *Y = nullptr;
1073     if (!Known.Zero[0] && !Known.One[0] &&
1074         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1075       Known2.resetAll();
1076       computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1077       if (Known2.countMinTrailingOnes() > 0)
1078         Known.Zero.setBit(0);
1079     }
1080     break;
1081   }
1082   case Instruction::Or:
1083     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1084     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1085 
1086     Known |= Known2;
1087     break;
1088   case Instruction::Xor:
1089     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1090     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1091 
1092     Known ^= Known2;
1093     break;
1094   case Instruction::Mul: {
1095     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1096     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1097                         Known, Known2, Depth, Q);
1098     break;
1099   }
1100   case Instruction::UDiv: {
1101     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1102     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1103     Known = KnownBits::udiv(Known, Known2);
1104     break;
1105   }
1106   case Instruction::Select: {
1107     const Value *LHS = nullptr, *RHS = nullptr;
1108     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1109     if (SelectPatternResult::isMinOrMax(SPF)) {
1110       computeKnownBits(RHS, Known, Depth + 1, Q);
1111       computeKnownBits(LHS, Known2, Depth + 1, Q);
1112       switch (SPF) {
1113       default:
1114         llvm_unreachable("Unhandled select pattern flavor!");
1115       case SPF_SMAX:
1116         Known = KnownBits::smax(Known, Known2);
1117         break;
1118       case SPF_SMIN:
1119         Known = KnownBits::smin(Known, Known2);
1120         break;
1121       case SPF_UMAX:
1122         Known = KnownBits::umax(Known, Known2);
1123         break;
1124       case SPF_UMIN:
1125         Known = KnownBits::umin(Known, Known2);
1126         break;
1127       }
1128       break;
1129     }
1130 
1131     computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1132     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1133 
1134     // Only known if known in both the LHS and RHS.
1135     Known = KnownBits::commonBits(Known, Known2);
1136 
1137     if (SPF == SPF_ABS) {
1138       // RHS from matchSelectPattern returns the negation part of abs pattern.
1139       // If the negate has an NSW flag we can assume the sign bit of the result
1140       // will be 0 because that makes abs(INT_MIN) undefined.
1141       if (match(RHS, m_Neg(m_Specific(LHS))) &&
1142           Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1143         Known.Zero.setSignBit();
1144     }
1145 
1146     break;
1147   }
1148   case Instruction::FPTrunc:
1149   case Instruction::FPExt:
1150   case Instruction::FPToUI:
1151   case Instruction::FPToSI:
1152   case Instruction::SIToFP:
1153   case Instruction::UIToFP:
1154     break; // Can't work with floating point.
1155   case Instruction::PtrToInt:
1156   case Instruction::IntToPtr:
1157     // Fall through and handle them the same as zext/trunc.
1158     LLVM_FALLTHROUGH;
1159   case Instruction::ZExt:
1160   case Instruction::Trunc: {
1161     Type *SrcTy = I->getOperand(0)->getType();
1162 
1163     unsigned SrcBitWidth;
1164     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1165     // which fall through here.
1166     Type *ScalarTy = SrcTy->getScalarType();
1167     SrcBitWidth = ScalarTy->isPointerTy() ?
1168       Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1169       Q.DL.getTypeSizeInBits(ScalarTy);
1170 
1171     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1172     Known = Known.anyextOrTrunc(SrcBitWidth);
1173     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1174     Known = Known.zextOrTrunc(BitWidth);
1175     break;
1176   }
1177   case Instruction::BitCast: {
1178     Type *SrcTy = I->getOperand(0)->getType();
1179     if (SrcTy->isIntOrPtrTy() &&
1180         // TODO: For now, not handling conversions like:
1181         // (bitcast i64 %x to <2 x i32>)
1182         !I->getType()->isVectorTy()) {
1183       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1184       break;
1185     }
1186 
1187     // Handle cast from vector integer type to scalar or vector integer.
1188     auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1189     if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1190         !I->getType()->isIntOrIntVectorTy())
1191       break;
1192 
1193     // Look through a cast from narrow vector elements to wider type.
1194     // Examples: v4i32 -> v2i64, v3i8 -> v24
1195     unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1196     if (BitWidth % SubBitWidth == 0) {
1197       // Known bits are automatically intersected across demanded elements of a
1198       // vector. So for example, if a bit is computed as known zero, it must be
1199       // zero across all demanded elements of the vector.
1200       //
1201       // For this bitcast, each demanded element of the output is sub-divided
1202       // across a set of smaller vector elements in the source vector. To get
1203       // the known bits for an entire element of the output, compute the known
1204       // bits for each sub-element sequentially. This is done by shifting the
1205       // one-set-bit demanded elements parameter across the sub-elements for
1206       // consecutive calls to computeKnownBits. We are using the demanded
1207       // elements parameter as a mask operator.
1208       //
1209       // The known bits of each sub-element are then inserted into place
1210       // (dependent on endian) to form the full result of known bits.
1211       unsigned NumElts = DemandedElts.getBitWidth();
1212       unsigned SubScale = BitWidth / SubBitWidth;
1213       APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1214       for (unsigned i = 0; i != NumElts; ++i) {
1215         if (DemandedElts[i])
1216           SubDemandedElts.setBit(i * SubScale);
1217       }
1218 
1219       KnownBits KnownSrc(SubBitWidth);
1220       for (unsigned i = 0; i != SubScale; ++i) {
1221         computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1222                          Depth + 1, Q);
1223         unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1224         Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1225       }
1226     }
1227     break;
1228   }
1229   case Instruction::SExt: {
1230     // Compute the bits in the result that are not present in the input.
1231     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1232 
1233     Known = Known.trunc(SrcBitWidth);
1234     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1235     // If the sign bit of the input is known set or clear, then we know the
1236     // top bits of the result.
1237     Known = Known.sext(BitWidth);
1238     break;
1239   }
1240   case Instruction::Shl: {
1241     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1242     auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1243       KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1244       // If this shift has "nsw" keyword, then the result is either a poison
1245       // value or has the same sign bit as the first operand.
1246       if (NSW) {
1247         if (KnownVal.Zero.isSignBitSet())
1248           Result.Zero.setSignBit();
1249         if (KnownVal.One.isSignBitSet())
1250           Result.One.setSignBit();
1251       }
1252       return Result;
1253     };
1254     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1255                                       KF);
1256     // Trailing zeros of a right-shifted constant never decrease.
1257     const APInt *C;
1258     if (match(I->getOperand(0), m_APInt(C)))
1259       Known.Zero.setLowBits(C->countTrailingZeros());
1260     break;
1261   }
1262   case Instruction::LShr: {
1263     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1264       return KnownBits::lshr(KnownVal, KnownAmt);
1265     };
1266     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1267                                       KF);
1268     // Leading zeros of a left-shifted constant never decrease.
1269     const APInt *C;
1270     if (match(I->getOperand(0), m_APInt(C)))
1271       Known.Zero.setHighBits(C->countLeadingZeros());
1272     break;
1273   }
1274   case Instruction::AShr: {
1275     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1276       return KnownBits::ashr(KnownVal, KnownAmt);
1277     };
1278     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1279                                       KF);
1280     break;
1281   }
1282   case Instruction::Sub: {
1283     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1284     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1285                            DemandedElts, Known, Known2, Depth, Q);
1286     break;
1287   }
1288   case Instruction::Add: {
1289     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1290     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1291                            DemandedElts, Known, Known2, Depth, Q);
1292     break;
1293   }
1294   case Instruction::SRem:
1295     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1296     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1297     Known = KnownBits::srem(Known, Known2);
1298     break;
1299 
1300   case Instruction::URem:
1301     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1302     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1303     Known = KnownBits::urem(Known, Known2);
1304     break;
1305   case Instruction::Alloca:
1306     Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1307     break;
1308   case Instruction::GetElementPtr: {
1309     // Analyze all of the subscripts of this getelementptr instruction
1310     // to determine if we can prove known low zero bits.
1311     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1312     // Accumulate the constant indices in a separate variable
1313     // to minimize the number of calls to computeForAddSub.
1314     APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1315 
1316     gep_type_iterator GTI = gep_type_begin(I);
1317     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1318       // TrailZ can only become smaller, short-circuit if we hit zero.
1319       if (Known.isUnknown())
1320         break;
1321 
1322       Value *Index = I->getOperand(i);
1323 
1324       // Handle case when index is zero.
1325       Constant *CIndex = dyn_cast<Constant>(Index);
1326       if (CIndex && CIndex->isZeroValue())
1327         continue;
1328 
1329       if (StructType *STy = GTI.getStructTypeOrNull()) {
1330         // Handle struct member offset arithmetic.
1331 
1332         assert(CIndex &&
1333                "Access to structure field must be known at compile time");
1334 
1335         if (CIndex->getType()->isVectorTy())
1336           Index = CIndex->getSplatValue();
1337 
1338         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1339         const StructLayout *SL = Q.DL.getStructLayout(STy);
1340         uint64_t Offset = SL->getElementOffset(Idx);
1341         AccConstIndices += Offset;
1342         continue;
1343       }
1344 
1345       // Handle array index arithmetic.
1346       Type *IndexedTy = GTI.getIndexedType();
1347       if (!IndexedTy->isSized()) {
1348         Known.resetAll();
1349         break;
1350       }
1351 
1352       unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1353       KnownBits IndexBits(IndexBitWidth);
1354       computeKnownBits(Index, IndexBits, Depth + 1, Q);
1355       TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1356       uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1357       KnownBits ScalingFactor(IndexBitWidth);
1358       // Multiply by current sizeof type.
1359       // &A[i] == A + i * sizeof(*A[i]).
1360       if (IndexTypeSize.isScalable()) {
1361         // For scalable types the only thing we know about sizeof is
1362         // that this is a multiple of the minimum size.
1363         ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1364       } else if (IndexBits.isConstant()) {
1365         APInt IndexConst = IndexBits.getConstant();
1366         APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1367         IndexConst *= ScalingFactor;
1368         AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1369         continue;
1370       } else {
1371         ScalingFactor =
1372             KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1373       }
1374       IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1375 
1376       // If the offsets have a different width from the pointer, according
1377       // to the language reference we need to sign-extend or truncate them
1378       // to the width of the pointer.
1379       IndexBits = IndexBits.sextOrTrunc(BitWidth);
1380 
1381       // Note that inbounds does *not* guarantee nsw for the addition, as only
1382       // the offset is signed, while the base address is unsigned.
1383       Known = KnownBits::computeForAddSub(
1384           /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1385     }
1386     if (!Known.isUnknown() && !AccConstIndices.isNullValue()) {
1387       KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1388       Known = KnownBits::computeForAddSub(
1389           /*Add=*/true, /*NSW=*/false, Known, Index);
1390     }
1391     break;
1392   }
1393   case Instruction::PHI: {
1394     const PHINode *P = cast<PHINode>(I);
1395     BinaryOperator *BO = nullptr;
1396     Value *R = nullptr, *L = nullptr;
1397     if (matchSimpleRecurrence(P, BO, R, L)) {
1398       // Handle the case of a simple two-predecessor recurrence PHI.
1399       // There's a lot more that could theoretically be done here, but
1400       // this is sufficient to catch some interesting cases.
1401       unsigned Opcode = BO->getOpcode();
1402 
1403       // If this is a shift recurrence, we know the bits being shifted in.
1404       // We can combine that with information about the start value of the
1405       // recurrence to conclude facts about the result.
1406       if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1407            Opcode == Instruction::Shl) &&
1408           BO->getOperand(0) == I) {
1409 
1410         // We have matched a recurrence of the form:
1411         // %iv = [R, %entry], [%iv.next, %backedge]
1412         // %iv.next = shift_op %iv, L
1413 
1414         // Recurse with the phi context to avoid concern about whether facts
1415         // inferred hold at original context instruction.  TODO: It may be
1416         // correct to use the original context.  IF warranted, explore and
1417         // add sufficient tests to cover.
1418         Query RecQ = Q;
1419         RecQ.CxtI = P;
1420         computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1421         switch (Opcode) {
1422         case Instruction::Shl:
1423           // A shl recurrence will only increase the tailing zeros
1424           Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1425           break;
1426         case Instruction::LShr:
1427           // A lshr recurrence will preserve the leading zeros of the
1428           // start value
1429           Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1430           break;
1431         case Instruction::AShr:
1432           // An ashr recurrence will extend the initial sign bit
1433           Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1434           Known.One.setHighBits(Known2.countMinLeadingOnes());
1435           break;
1436         };
1437       }
1438 
1439       // Check for operations that have the property that if
1440       // both their operands have low zero bits, the result
1441       // will have low zero bits.
1442       if (Opcode == Instruction::Add ||
1443           Opcode == Instruction::Sub ||
1444           Opcode == Instruction::And ||
1445           Opcode == Instruction::Or ||
1446           Opcode == Instruction::Mul) {
1447         // Change the context instruction to the "edge" that flows into the
1448         // phi. This is important because that is where the value is actually
1449         // "evaluated" even though it is used later somewhere else. (see also
1450         // D69571).
1451         Query RecQ = Q;
1452 
1453         unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1454         Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1455         Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1456 
1457         // Ok, we have a PHI of the form L op= R. Check for low
1458         // zero bits.
1459         RecQ.CxtI = RInst;
1460         computeKnownBits(R, Known2, Depth + 1, RecQ);
1461 
1462         // We need to take the minimum number of known bits
1463         KnownBits Known3(BitWidth);
1464         RecQ.CxtI = LInst;
1465         computeKnownBits(L, Known3, Depth + 1, RecQ);
1466 
1467         Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1468                                        Known3.countMinTrailingZeros()));
1469 
1470         auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1471         if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1472           // If initial value of recurrence is nonnegative, and we are adding
1473           // a nonnegative number with nsw, the result can only be nonnegative
1474           // or poison value regardless of the number of times we execute the
1475           // add in phi recurrence. If initial value is negative and we are
1476           // adding a negative number with nsw, the result can only be
1477           // negative or poison value. Similar arguments apply to sub and mul.
1478           //
1479           // (add non-negative, non-negative) --> non-negative
1480           // (add negative, negative) --> negative
1481           if (Opcode == Instruction::Add) {
1482             if (Known2.isNonNegative() && Known3.isNonNegative())
1483               Known.makeNonNegative();
1484             else if (Known2.isNegative() && Known3.isNegative())
1485               Known.makeNegative();
1486           }
1487 
1488           // (sub nsw non-negative, negative) --> non-negative
1489           // (sub nsw negative, non-negative) --> negative
1490           else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1491             if (Known2.isNonNegative() && Known3.isNegative())
1492               Known.makeNonNegative();
1493             else if (Known2.isNegative() && Known3.isNonNegative())
1494               Known.makeNegative();
1495           }
1496 
1497           // (mul nsw non-negative, non-negative) --> non-negative
1498           else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1499                    Known3.isNonNegative())
1500             Known.makeNonNegative();
1501         }
1502 
1503         break;
1504       }
1505     }
1506 
1507     // Unreachable blocks may have zero-operand PHI nodes.
1508     if (P->getNumIncomingValues() == 0)
1509       break;
1510 
1511     // Otherwise take the unions of the known bit sets of the operands,
1512     // taking conservative care to avoid excessive recursion.
1513     if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1514       // Skip if every incoming value references to ourself.
1515       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1516         break;
1517 
1518       Known.Zero.setAllBits();
1519       Known.One.setAllBits();
1520       for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1521         Value *IncValue = P->getIncomingValue(u);
1522         // Skip direct self references.
1523         if (IncValue == P) continue;
1524 
1525         // Change the context instruction to the "edge" that flows into the
1526         // phi. This is important because that is where the value is actually
1527         // "evaluated" even though it is used later somewhere else. (see also
1528         // D69571).
1529         Query RecQ = Q;
1530         RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1531 
1532         Known2 = KnownBits(BitWidth);
1533         // Recurse, but cap the recursion to one level, because we don't
1534         // want to waste time spinning around in loops.
1535         computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1536         Known = KnownBits::commonBits(Known, Known2);
1537         // If all bits have been ruled out, there's no need to check
1538         // more operands.
1539         if (Known.isUnknown())
1540           break;
1541       }
1542     }
1543     break;
1544   }
1545   case Instruction::Call:
1546   case Instruction::Invoke:
1547     // If range metadata is attached to this call, set known bits from that,
1548     // and then intersect with known bits based on other properties of the
1549     // function.
1550     if (MDNode *MD =
1551             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1552       computeKnownBitsFromRangeMetadata(*MD, Known);
1553     if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1554       computeKnownBits(RV, Known2, Depth + 1, Q);
1555       Known.Zero |= Known2.Zero;
1556       Known.One |= Known2.One;
1557     }
1558     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1559       switch (II->getIntrinsicID()) {
1560       default: break;
1561       case Intrinsic::abs: {
1562         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1563         bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1564         Known = Known2.abs(IntMinIsPoison);
1565         break;
1566       }
1567       case Intrinsic::bitreverse:
1568         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1569         Known.Zero |= Known2.Zero.reverseBits();
1570         Known.One |= Known2.One.reverseBits();
1571         break;
1572       case Intrinsic::bswap:
1573         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1574         Known.Zero |= Known2.Zero.byteSwap();
1575         Known.One |= Known2.One.byteSwap();
1576         break;
1577       case Intrinsic::ctlz: {
1578         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1579         // If we have a known 1, its position is our upper bound.
1580         unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1581         // If this call is undefined for 0, the result will be less than 2^n.
1582         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1583           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1584         unsigned LowBits = Log2_32(PossibleLZ)+1;
1585         Known.Zero.setBitsFrom(LowBits);
1586         break;
1587       }
1588       case Intrinsic::cttz: {
1589         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1590         // If we have a known 1, its position is our upper bound.
1591         unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1592         // If this call is undefined for 0, the result will be less than 2^n.
1593         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1594           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1595         unsigned LowBits = Log2_32(PossibleTZ)+1;
1596         Known.Zero.setBitsFrom(LowBits);
1597         break;
1598       }
1599       case Intrinsic::ctpop: {
1600         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1601         // We can bound the space the count needs.  Also, bits known to be zero
1602         // can't contribute to the population.
1603         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1604         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1605         Known.Zero.setBitsFrom(LowBits);
1606         // TODO: we could bound KnownOne using the lower bound on the number
1607         // of bits which might be set provided by popcnt KnownOne2.
1608         break;
1609       }
1610       case Intrinsic::fshr:
1611       case Intrinsic::fshl: {
1612         const APInt *SA;
1613         if (!match(I->getOperand(2), m_APInt(SA)))
1614           break;
1615 
1616         // Normalize to funnel shift left.
1617         uint64_t ShiftAmt = SA->urem(BitWidth);
1618         if (II->getIntrinsicID() == Intrinsic::fshr)
1619           ShiftAmt = BitWidth - ShiftAmt;
1620 
1621         KnownBits Known3(BitWidth);
1622         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1623         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1624 
1625         Known.Zero =
1626             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1627         Known.One =
1628             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1629         break;
1630       }
1631       case Intrinsic::uadd_sat:
1632       case Intrinsic::usub_sat: {
1633         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1634         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1635         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1636 
1637         // Add: Leading ones of either operand are preserved.
1638         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1639         // as leading zeros in the result.
1640         unsigned LeadingKnown;
1641         if (IsAdd)
1642           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1643                                   Known2.countMinLeadingOnes());
1644         else
1645           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1646                                   Known2.countMinLeadingOnes());
1647 
1648         Known = KnownBits::computeForAddSub(
1649             IsAdd, /* NSW */ false, Known, Known2);
1650 
1651         // We select between the operation result and all-ones/zero
1652         // respectively, so we can preserve known ones/zeros.
1653         if (IsAdd) {
1654           Known.One.setHighBits(LeadingKnown);
1655           Known.Zero.clearAllBits();
1656         } else {
1657           Known.Zero.setHighBits(LeadingKnown);
1658           Known.One.clearAllBits();
1659         }
1660         break;
1661       }
1662       case Intrinsic::umin:
1663         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1664         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1665         Known = KnownBits::umin(Known, Known2);
1666         break;
1667       case Intrinsic::umax:
1668         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1669         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1670         Known = KnownBits::umax(Known, Known2);
1671         break;
1672       case Intrinsic::smin:
1673         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1674         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1675         Known = KnownBits::smin(Known, Known2);
1676         break;
1677       case Intrinsic::smax:
1678         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1679         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1680         Known = KnownBits::smax(Known, Known2);
1681         break;
1682       case Intrinsic::x86_sse42_crc32_64_64:
1683         Known.Zero.setBitsFrom(32);
1684         break;
1685       case Intrinsic::riscv_vsetvli:
1686       case Intrinsic::riscv_vsetvlimax:
1687         // Assume that VL output is positive and would fit in an int32_t.
1688         // TODO: VLEN might be capped at 16 bits in a future V spec update.
1689         if (BitWidth >= 32)
1690           Known.Zero.setBitsFrom(31);
1691         break;
1692       }
1693     }
1694     break;
1695   case Instruction::ShuffleVector: {
1696     auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1697     // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1698     if (!Shuf) {
1699       Known.resetAll();
1700       return;
1701     }
1702     // For undef elements, we don't know anything about the common state of
1703     // the shuffle result.
1704     APInt DemandedLHS, DemandedRHS;
1705     if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1706       Known.resetAll();
1707       return;
1708     }
1709     Known.One.setAllBits();
1710     Known.Zero.setAllBits();
1711     if (!!DemandedLHS) {
1712       const Value *LHS = Shuf->getOperand(0);
1713       computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1714       // If we don't know any bits, early out.
1715       if (Known.isUnknown())
1716         break;
1717     }
1718     if (!!DemandedRHS) {
1719       const Value *RHS = Shuf->getOperand(1);
1720       computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1721       Known = KnownBits::commonBits(Known, Known2);
1722     }
1723     break;
1724   }
1725   case Instruction::InsertElement: {
1726     const Value *Vec = I->getOperand(0);
1727     const Value *Elt = I->getOperand(1);
1728     auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1729     // Early out if the index is non-constant or out-of-range.
1730     unsigned NumElts = DemandedElts.getBitWidth();
1731     if (!CIdx || CIdx->getValue().uge(NumElts)) {
1732       Known.resetAll();
1733       return;
1734     }
1735     Known.One.setAllBits();
1736     Known.Zero.setAllBits();
1737     unsigned EltIdx = CIdx->getZExtValue();
1738     // Do we demand the inserted element?
1739     if (DemandedElts[EltIdx]) {
1740       computeKnownBits(Elt, Known, Depth + 1, Q);
1741       // If we don't know any bits, early out.
1742       if (Known.isUnknown())
1743         break;
1744     }
1745     // We don't need the base vector element that has been inserted.
1746     APInt DemandedVecElts = DemandedElts;
1747     DemandedVecElts.clearBit(EltIdx);
1748     if (!!DemandedVecElts) {
1749       computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1750       Known = KnownBits::commonBits(Known, Known2);
1751     }
1752     break;
1753   }
1754   case Instruction::ExtractElement: {
1755     // Look through extract element. If the index is non-constant or
1756     // out-of-range demand all elements, otherwise just the extracted element.
1757     const Value *Vec = I->getOperand(0);
1758     const Value *Idx = I->getOperand(1);
1759     auto *CIdx = dyn_cast<ConstantInt>(Idx);
1760     if (isa<ScalableVectorType>(Vec->getType())) {
1761       // FIXME: there's probably *something* we can do with scalable vectors
1762       Known.resetAll();
1763       break;
1764     }
1765     unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1766     APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1767     if (CIdx && CIdx->getValue().ult(NumElts))
1768       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1769     computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1770     break;
1771   }
1772   case Instruction::ExtractValue:
1773     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1774       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1775       if (EVI->getNumIndices() != 1) break;
1776       if (EVI->getIndices()[0] == 0) {
1777         switch (II->getIntrinsicID()) {
1778         default: break;
1779         case Intrinsic::uadd_with_overflow:
1780         case Intrinsic::sadd_with_overflow:
1781           computeKnownBitsAddSub(true, II->getArgOperand(0),
1782                                  II->getArgOperand(1), false, DemandedElts,
1783                                  Known, Known2, Depth, Q);
1784           break;
1785         case Intrinsic::usub_with_overflow:
1786         case Intrinsic::ssub_with_overflow:
1787           computeKnownBitsAddSub(false, II->getArgOperand(0),
1788                                  II->getArgOperand(1), false, DemandedElts,
1789                                  Known, Known2, Depth, Q);
1790           break;
1791         case Intrinsic::umul_with_overflow:
1792         case Intrinsic::smul_with_overflow:
1793           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1794                               DemandedElts, Known, Known2, Depth, Q);
1795           break;
1796         }
1797       }
1798     }
1799     break;
1800   case Instruction::Freeze:
1801     if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1802                                   Depth + 1))
1803       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1804     break;
1805   }
1806 }
1807 
1808 /// Determine which bits of V are known to be either zero or one and return
1809 /// them.
1810 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1811                            unsigned Depth, const Query &Q) {
1812   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1813   computeKnownBits(V, DemandedElts, Known, Depth, Q);
1814   return Known;
1815 }
1816 
1817 /// Determine which bits of V are known to be either zero or one and return
1818 /// them.
1819 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1820   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1821   computeKnownBits(V, Known, Depth, Q);
1822   return Known;
1823 }
1824 
1825 /// Determine which bits of V are known to be either zero or one and return
1826 /// them in the Known bit set.
1827 ///
1828 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1829 /// we cannot optimize based on the assumption that it is zero without changing
1830 /// it to be an explicit zero.  If we don't change it to zero, other code could
1831 /// optimized based on the contradictory assumption that it is non-zero.
1832 /// Because instcombine aggressively folds operations with undef args anyway,
1833 /// this won't lose us code quality.
1834 ///
1835 /// This function is defined on values with integer type, values with pointer
1836 /// type, and vectors of integers.  In the case
1837 /// where V is a vector, known zero, and known one values are the
1838 /// same width as the vector element, and the bit is set only if it is true
1839 /// for all of the demanded elements in the vector specified by DemandedElts.
1840 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1841                       KnownBits &Known, unsigned Depth, const Query &Q) {
1842   if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1843     // No demanded elts or V is a scalable vector, better to assume we don't
1844     // know anything.
1845     Known.resetAll();
1846     return;
1847   }
1848 
1849   assert(V && "No Value?");
1850   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1851 
1852 #ifndef NDEBUG
1853   Type *Ty = V->getType();
1854   unsigned BitWidth = Known.getBitWidth();
1855 
1856   assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1857          "Not integer or pointer type!");
1858 
1859   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1860     assert(
1861         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1862         "DemandedElt width should equal the fixed vector number of elements");
1863   } else {
1864     assert(DemandedElts == APInt(1, 1) &&
1865            "DemandedElt width should be 1 for scalars");
1866   }
1867 
1868   Type *ScalarTy = Ty->getScalarType();
1869   if (ScalarTy->isPointerTy()) {
1870     assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1871            "V and Known should have same BitWidth");
1872   } else {
1873     assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1874            "V and Known should have same BitWidth");
1875   }
1876 #endif
1877 
1878   const APInt *C;
1879   if (match(V, m_APInt(C))) {
1880     // We know all of the bits for a scalar constant or a splat vector constant!
1881     Known = KnownBits::makeConstant(*C);
1882     return;
1883   }
1884   // Null and aggregate-zero are all-zeros.
1885   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1886     Known.setAllZero();
1887     return;
1888   }
1889   // Handle a constant vector by taking the intersection of the known bits of
1890   // each element.
1891   if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1892     // We know that CDV must be a vector of integers. Take the intersection of
1893     // each element.
1894     Known.Zero.setAllBits(); Known.One.setAllBits();
1895     for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1896       if (!DemandedElts[i])
1897         continue;
1898       APInt Elt = CDV->getElementAsAPInt(i);
1899       Known.Zero &= ~Elt;
1900       Known.One &= Elt;
1901     }
1902     return;
1903   }
1904 
1905   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1906     // We know that CV must be a vector of integers. Take the intersection of
1907     // each element.
1908     Known.Zero.setAllBits(); Known.One.setAllBits();
1909     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1910       if (!DemandedElts[i])
1911         continue;
1912       Constant *Element = CV->getAggregateElement(i);
1913       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1914       if (!ElementCI) {
1915         Known.resetAll();
1916         return;
1917       }
1918       const APInt &Elt = ElementCI->getValue();
1919       Known.Zero &= ~Elt;
1920       Known.One &= Elt;
1921     }
1922     return;
1923   }
1924 
1925   // Start out not knowing anything.
1926   Known.resetAll();
1927 
1928   // We can't imply anything about undefs.
1929   if (isa<UndefValue>(V))
1930     return;
1931 
1932   // There's no point in looking through other users of ConstantData for
1933   // assumptions.  Confirm that we've handled them all.
1934   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1935 
1936   // All recursive calls that increase depth must come after this.
1937   if (Depth == MaxAnalysisRecursionDepth)
1938     return;
1939 
1940   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1941   // the bits of its aliasee.
1942   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1943     if (!GA->isInterposable())
1944       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1945     return;
1946   }
1947 
1948   if (const Operator *I = dyn_cast<Operator>(V))
1949     computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1950 
1951   // Aligned pointers have trailing zeros - refine Known.Zero set
1952   if (isa<PointerType>(V->getType())) {
1953     Align Alignment = V->getPointerAlignment(Q.DL);
1954     Known.Zero.setLowBits(Log2(Alignment));
1955   }
1956 
1957   // computeKnownBitsFromAssume strictly refines Known.
1958   // Therefore, we run them after computeKnownBitsFromOperator.
1959 
1960   // Check whether a nearby assume intrinsic can determine some known bits.
1961   computeKnownBitsFromAssume(V, Known, Depth, Q);
1962 
1963   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1964 }
1965 
1966 /// Return true if the given value is known to have exactly one
1967 /// bit set when defined. For vectors return true if every element is known to
1968 /// be a power of two when defined. Supports values with integer or pointer
1969 /// types and vectors of integers.
1970 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1971                             const Query &Q) {
1972   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1973 
1974   // Attempt to match against constants.
1975   if (OrZero && match(V, m_Power2OrZero()))
1976       return true;
1977   if (match(V, m_Power2()))
1978       return true;
1979 
1980   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1981   // it is shifted off the end then the result is undefined.
1982   if (match(V, m_Shl(m_One(), m_Value())))
1983     return true;
1984 
1985   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1986   // the bottom.  If it is shifted off the bottom then the result is undefined.
1987   if (match(V, m_LShr(m_SignMask(), m_Value())))
1988     return true;
1989 
1990   // The remaining tests are all recursive, so bail out if we hit the limit.
1991   if (Depth++ == MaxAnalysisRecursionDepth)
1992     return false;
1993 
1994   Value *X = nullptr, *Y = nullptr;
1995   // A shift left or a logical shift right of a power of two is a power of two
1996   // or zero.
1997   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1998                  match(V, m_LShr(m_Value(X), m_Value()))))
1999     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2000 
2001   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2002     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2003 
2004   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2005     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2006            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2007 
2008   // Peek through min/max.
2009   if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
2010     return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
2011            isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
2012   }
2013 
2014   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2015     // A power of two and'd with anything is a power of two or zero.
2016     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2017         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2018       return true;
2019     // X & (-X) is always a power of two or zero.
2020     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2021       return true;
2022     return false;
2023   }
2024 
2025   // Adding a power-of-two or zero to the same power-of-two or zero yields
2026   // either the original power-of-two, a larger power-of-two or zero.
2027   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2028     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2029     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2030         Q.IIQ.hasNoSignedWrap(VOBO)) {
2031       if (match(X, m_And(m_Specific(Y), m_Value())) ||
2032           match(X, m_And(m_Value(), m_Specific(Y))))
2033         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2034           return true;
2035       if (match(Y, m_And(m_Specific(X), m_Value())) ||
2036           match(Y, m_And(m_Value(), m_Specific(X))))
2037         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2038           return true;
2039 
2040       unsigned BitWidth = V->getType()->getScalarSizeInBits();
2041       KnownBits LHSBits(BitWidth);
2042       computeKnownBits(X, LHSBits, Depth, Q);
2043 
2044       KnownBits RHSBits(BitWidth);
2045       computeKnownBits(Y, RHSBits, Depth, Q);
2046       // If i8 V is a power of two or zero:
2047       //  ZeroBits: 1 1 1 0 1 1 1 1
2048       // ~ZeroBits: 0 0 0 1 0 0 0 0
2049       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2050         // If OrZero isn't set, we cannot give back a zero result.
2051         // Make sure either the LHS or RHS has a bit set.
2052         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2053           return true;
2054     }
2055   }
2056 
2057   // An exact divide or right shift can only shift off zero bits, so the result
2058   // is a power of two only if the first operand is a power of two and not
2059   // copying a sign bit (sdiv int_min, 2).
2060   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2061       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2062     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2063                                   Depth, Q);
2064   }
2065 
2066   return false;
2067 }
2068 
2069 /// Test whether a GEP's result is known to be non-null.
2070 ///
2071 /// Uses properties inherent in a GEP to try to determine whether it is known
2072 /// to be non-null.
2073 ///
2074 /// Currently this routine does not support vector GEPs.
2075 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2076                               const Query &Q) {
2077   const Function *F = nullptr;
2078   if (const Instruction *I = dyn_cast<Instruction>(GEP))
2079     F = I->getFunction();
2080 
2081   if (!GEP->isInBounds() ||
2082       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2083     return false;
2084 
2085   // FIXME: Support vector-GEPs.
2086   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2087 
2088   // If the base pointer is non-null, we cannot walk to a null address with an
2089   // inbounds GEP in address space zero.
2090   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2091     return true;
2092 
2093   // Walk the GEP operands and see if any operand introduces a non-zero offset.
2094   // If so, then the GEP cannot produce a null pointer, as doing so would
2095   // inherently violate the inbounds contract within address space zero.
2096   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2097        GTI != GTE; ++GTI) {
2098     // Struct types are easy -- they must always be indexed by a constant.
2099     if (StructType *STy = GTI.getStructTypeOrNull()) {
2100       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2101       unsigned ElementIdx = OpC->getZExtValue();
2102       const StructLayout *SL = Q.DL.getStructLayout(STy);
2103       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2104       if (ElementOffset > 0)
2105         return true;
2106       continue;
2107     }
2108 
2109     // If we have a zero-sized type, the index doesn't matter. Keep looping.
2110     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2111       continue;
2112 
2113     // Fast path the constant operand case both for efficiency and so we don't
2114     // increment Depth when just zipping down an all-constant GEP.
2115     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2116       if (!OpC->isZero())
2117         return true;
2118       continue;
2119     }
2120 
2121     // We post-increment Depth here because while isKnownNonZero increments it
2122     // as well, when we pop back up that increment won't persist. We don't want
2123     // to recurse 10k times just because we have 10k GEP operands. We don't
2124     // bail completely out because we want to handle constant GEPs regardless
2125     // of depth.
2126     if (Depth++ >= MaxAnalysisRecursionDepth)
2127       continue;
2128 
2129     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2130       return true;
2131   }
2132 
2133   return false;
2134 }
2135 
2136 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2137                                                   const Instruction *CtxI,
2138                                                   const DominatorTree *DT) {
2139   if (isa<Constant>(V))
2140     return false;
2141 
2142   if (!CtxI || !DT)
2143     return false;
2144 
2145   unsigned NumUsesExplored = 0;
2146   for (auto *U : V->users()) {
2147     // Avoid massive lists
2148     if (NumUsesExplored >= DomConditionsMaxUses)
2149       break;
2150     NumUsesExplored++;
2151 
2152     // If the value is used as an argument to a call or invoke, then argument
2153     // attributes may provide an answer about null-ness.
2154     if (const auto *CB = dyn_cast<CallBase>(U))
2155       if (auto *CalledFunc = CB->getCalledFunction())
2156         for (const Argument &Arg : CalledFunc->args())
2157           if (CB->getArgOperand(Arg.getArgNo()) == V &&
2158               Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2159               DT->dominates(CB, CtxI))
2160             return true;
2161 
2162     // If the value is used as a load/store, then the pointer must be non null.
2163     if (V == getLoadStorePointerOperand(U)) {
2164       const Instruction *I = cast<Instruction>(U);
2165       if (!NullPointerIsDefined(I->getFunction(),
2166                                 V->getType()->getPointerAddressSpace()) &&
2167           DT->dominates(I, CtxI))
2168         return true;
2169     }
2170 
2171     // Consider only compare instructions uniquely controlling a branch
2172     Value *RHS;
2173     CmpInst::Predicate Pred;
2174     if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2175       continue;
2176 
2177     bool NonNullIfTrue;
2178     if (cmpExcludesZero(Pred, RHS))
2179       NonNullIfTrue = true;
2180     else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2181       NonNullIfTrue = false;
2182     else
2183       continue;
2184 
2185     SmallVector<const User *, 4> WorkList;
2186     SmallPtrSet<const User *, 4> Visited;
2187     for (auto *CmpU : U->users()) {
2188       assert(WorkList.empty() && "Should be!");
2189       if (Visited.insert(CmpU).second)
2190         WorkList.push_back(CmpU);
2191 
2192       while (!WorkList.empty()) {
2193         auto *Curr = WorkList.pop_back_val();
2194 
2195         // If a user is an AND, add all its users to the work list. We only
2196         // propagate "pred != null" condition through AND because it is only
2197         // correct to assume that all conditions of AND are met in true branch.
2198         // TODO: Support similar logic of OR and EQ predicate?
2199         if (NonNullIfTrue)
2200           if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2201             for (auto *CurrU : Curr->users())
2202               if (Visited.insert(CurrU).second)
2203                 WorkList.push_back(CurrU);
2204             continue;
2205           }
2206 
2207         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2208           assert(BI->isConditional() && "uses a comparison!");
2209 
2210           BasicBlock *NonNullSuccessor =
2211               BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2212           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2213           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2214             return true;
2215         } else if (NonNullIfTrue && isGuard(Curr) &&
2216                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
2217           return true;
2218         }
2219       }
2220     }
2221   }
2222 
2223   return false;
2224 }
2225 
2226 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2227 /// ensure that the value it's attached to is never Value?  'RangeType' is
2228 /// is the type of the value described by the range.
2229 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2230   const unsigned NumRanges = Ranges->getNumOperands() / 2;
2231   assert(NumRanges >= 1);
2232   for (unsigned i = 0; i < NumRanges; ++i) {
2233     ConstantInt *Lower =
2234         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2235     ConstantInt *Upper =
2236         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2237     ConstantRange Range(Lower->getValue(), Upper->getValue());
2238     if (Range.contains(Value))
2239       return false;
2240   }
2241   return true;
2242 }
2243 
2244 /// Try to detect a recurrence that monotonically increases/decreases from a
2245 /// non-zero starting value. These are common as induction variables.
2246 static bool isNonZeroRecurrence(const PHINode *PN) {
2247   BinaryOperator *BO = nullptr;
2248   Value *Start = nullptr, *Step = nullptr;
2249   const APInt *StartC, *StepC;
2250   if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2251       !match(Start, m_APInt(StartC)) || StartC->isNullValue())
2252     return false;
2253 
2254   switch (BO->getOpcode()) {
2255   case Instruction::Add:
2256     // Starting from non-zero and stepping away from zero can never wrap back
2257     // to zero.
2258     return BO->hasNoUnsignedWrap() ||
2259            (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2260             StartC->isNegative() == StepC->isNegative());
2261   case Instruction::Mul:
2262     return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2263            match(Step, m_APInt(StepC)) && !StepC->isNullValue();
2264   case Instruction::Shl:
2265     return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2266   case Instruction::AShr:
2267   case Instruction::LShr:
2268     return BO->isExact();
2269   default:
2270     return false;
2271   }
2272 }
2273 
2274 /// Return true if the given value is known to be non-zero when defined. For
2275 /// vectors, return true if every demanded element is known to be non-zero when
2276 /// defined. For pointers, if the context instruction and dominator tree are
2277 /// specified, perform context-sensitive analysis and return true if the
2278 /// pointer couldn't possibly be null at the specified instruction.
2279 /// Supports values with integer or pointer type and vectors of integers.
2280 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2281                     const Query &Q) {
2282   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2283   // vector
2284   if (isa<ScalableVectorType>(V->getType()))
2285     return false;
2286 
2287   if (auto *C = dyn_cast<Constant>(V)) {
2288     if (C->isNullValue())
2289       return false;
2290     if (isa<ConstantInt>(C))
2291       // Must be non-zero due to null test above.
2292       return true;
2293 
2294     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2295       // See the comment for IntToPtr/PtrToInt instructions below.
2296       if (CE->getOpcode() == Instruction::IntToPtr ||
2297           CE->getOpcode() == Instruction::PtrToInt)
2298         if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2299                 .getFixedSize() <=
2300             Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2301           return isKnownNonZero(CE->getOperand(0), Depth, Q);
2302     }
2303 
2304     // For constant vectors, check that all elements are undefined or known
2305     // non-zero to determine that the whole vector is known non-zero.
2306     if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2307       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2308         if (!DemandedElts[i])
2309           continue;
2310         Constant *Elt = C->getAggregateElement(i);
2311         if (!Elt || Elt->isNullValue())
2312           return false;
2313         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2314           return false;
2315       }
2316       return true;
2317     }
2318 
2319     // A global variable in address space 0 is non null unless extern weak
2320     // or an absolute symbol reference. Other address spaces may have null as a
2321     // valid address for a global, so we can't assume anything.
2322     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2323       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2324           GV->getType()->getAddressSpace() == 0)
2325         return true;
2326     } else
2327       return false;
2328   }
2329 
2330   if (auto *I = dyn_cast<Instruction>(V)) {
2331     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2332       // If the possible ranges don't contain zero, then the value is
2333       // definitely non-zero.
2334       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2335         const APInt ZeroValue(Ty->getBitWidth(), 0);
2336         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2337           return true;
2338       }
2339     }
2340   }
2341 
2342   if (isKnownNonZeroFromAssume(V, Q))
2343     return true;
2344 
2345   // Some of the tests below are recursive, so bail out if we hit the limit.
2346   if (Depth++ >= MaxAnalysisRecursionDepth)
2347     return false;
2348 
2349   // Check for pointer simplifications.
2350 
2351   if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2352     // Alloca never returns null, malloc might.
2353     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2354       return true;
2355 
2356     // A byval, inalloca may not be null in a non-default addres space. A
2357     // nonnull argument is assumed never 0.
2358     if (const Argument *A = dyn_cast<Argument>(V)) {
2359       if (((A->hasPassPointeeByValueCopyAttr() &&
2360             !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2361            A->hasNonNullAttr()))
2362         return true;
2363     }
2364 
2365     // A Load tagged with nonnull metadata is never null.
2366     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2367       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2368         return true;
2369 
2370     if (const auto *Call = dyn_cast<CallBase>(V)) {
2371       if (Call->isReturnNonNull())
2372         return true;
2373       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2374         return isKnownNonZero(RP, Depth, Q);
2375     }
2376   }
2377 
2378   if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2379     return true;
2380 
2381   // Check for recursive pointer simplifications.
2382   if (V->getType()->isPointerTy()) {
2383     // Look through bitcast operations, GEPs, and int2ptr instructions as they
2384     // do not alter the value, or at least not the nullness property of the
2385     // value, e.g., int2ptr is allowed to zero/sign extend the value.
2386     //
2387     // Note that we have to take special care to avoid looking through
2388     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2389     // as casts that can alter the value, e.g., AddrSpaceCasts.
2390     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2391       return isGEPKnownNonNull(GEP, Depth, Q);
2392 
2393     if (auto *BCO = dyn_cast<BitCastOperator>(V))
2394       return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2395 
2396     if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2397       if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2398           Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2399         return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2400   }
2401 
2402   // Similar to int2ptr above, we can look through ptr2int here if the cast
2403   // is a no-op or an extend and not a truncate.
2404   if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2405     if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2406         Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2407       return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2408 
2409   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2410 
2411   // X | Y != 0 if X != 0 or Y != 0.
2412   Value *X = nullptr, *Y = nullptr;
2413   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2414     return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2415            isKnownNonZero(Y, DemandedElts, Depth, Q);
2416 
2417   // ext X != 0 if X != 0.
2418   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2419     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2420 
2421   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2422   // if the lowest bit is shifted off the end.
2423   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2424     // shl nuw can't remove any non-zero bits.
2425     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2426     if (Q.IIQ.hasNoUnsignedWrap(BO))
2427       return isKnownNonZero(X, Depth, Q);
2428 
2429     KnownBits Known(BitWidth);
2430     computeKnownBits(X, DemandedElts, Known, Depth, Q);
2431     if (Known.One[0])
2432       return true;
2433   }
2434   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2435   // defined if the sign bit is shifted off the end.
2436   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2437     // shr exact can only shift out zero bits.
2438     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2439     if (BO->isExact())
2440       return isKnownNonZero(X, Depth, Q);
2441 
2442     KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2443     if (Known.isNegative())
2444       return true;
2445 
2446     // If the shifter operand is a constant, and all of the bits shifted
2447     // out are known to be zero, and X is known non-zero then at least one
2448     // non-zero bit must remain.
2449     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2450       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2451       // Is there a known one in the portion not shifted out?
2452       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2453         return true;
2454       // Are all the bits to be shifted out known zero?
2455       if (Known.countMinTrailingZeros() >= ShiftVal)
2456         return isKnownNonZero(X, DemandedElts, Depth, Q);
2457     }
2458   }
2459   // div exact can only produce a zero if the dividend is zero.
2460   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2461     return isKnownNonZero(X, DemandedElts, Depth, Q);
2462   }
2463   // X + Y.
2464   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2465     KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2466     KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2467 
2468     // If X and Y are both non-negative (as signed values) then their sum is not
2469     // zero unless both X and Y are zero.
2470     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2471       if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2472           isKnownNonZero(Y, DemandedElts, Depth, Q))
2473         return true;
2474 
2475     // If X and Y are both negative (as signed values) then their sum is not
2476     // zero unless both X and Y equal INT_MIN.
2477     if (XKnown.isNegative() && YKnown.isNegative()) {
2478       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2479       // The sign bit of X is set.  If some other bit is set then X is not equal
2480       // to INT_MIN.
2481       if (XKnown.One.intersects(Mask))
2482         return true;
2483       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2484       // to INT_MIN.
2485       if (YKnown.One.intersects(Mask))
2486         return true;
2487     }
2488 
2489     // The sum of a non-negative number and a power of two is not zero.
2490     if (XKnown.isNonNegative() &&
2491         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2492       return true;
2493     if (YKnown.isNonNegative() &&
2494         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2495       return true;
2496   }
2497   // X * Y.
2498   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2499     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2500     // If X and Y are non-zero then so is X * Y as long as the multiplication
2501     // does not overflow.
2502     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2503         isKnownNonZero(X, DemandedElts, Depth, Q) &&
2504         isKnownNonZero(Y, DemandedElts, Depth, Q))
2505       return true;
2506   }
2507   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2508   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2509     if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2510         isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2511       return true;
2512   }
2513   // PHI
2514   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2515     if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2516       return true;
2517 
2518     // Check if all incoming values are non-zero using recursion.
2519     Query RecQ = Q;
2520     unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2521     return llvm::all_of(PN->operands(), [&](const Use &U) {
2522       if (U.get() == PN)
2523         return true;
2524       RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2525       return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2526     });
2527   }
2528   // ExtractElement
2529   else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2530     const Value *Vec = EEI->getVectorOperand();
2531     const Value *Idx = EEI->getIndexOperand();
2532     auto *CIdx = dyn_cast<ConstantInt>(Idx);
2533     if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2534       unsigned NumElts = VecTy->getNumElements();
2535       APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2536       if (CIdx && CIdx->getValue().ult(NumElts))
2537         DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2538       return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2539     }
2540   }
2541   // Freeze
2542   else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2543     auto *Op = FI->getOperand(0);
2544     if (isKnownNonZero(Op, Depth, Q) &&
2545         isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2546       return true;
2547   }
2548 
2549   KnownBits Known(BitWidth);
2550   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2551   return Known.One != 0;
2552 }
2553 
2554 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2555   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2556   // vector
2557   if (isa<ScalableVectorType>(V->getType()))
2558     return false;
2559 
2560   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2561   APInt DemandedElts =
2562       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
2563   return isKnownNonZero(V, DemandedElts, Depth, Q);
2564 }
2565 
2566 /// If the pair of operators are the same invertible function, return the
2567 /// the operands of the function corresponding to each input. Otherwise,
2568 /// return None.  An invertible function is one that is 1-to-1 and maps
2569 /// every input value to exactly one output value.  This is equivalent to
2570 /// saying that Op1 and Op2 are equal exactly when the specified pair of
2571 /// operands are equal, (except that Op1 and Op2 may be poison more often.)
2572 static Optional<std::pair<Value*, Value*>>
2573 getInvertibleOperands(const Operator *Op1,
2574                       const Operator *Op2) {
2575   if (Op1->getOpcode() != Op2->getOpcode())
2576     return None;
2577 
2578   auto getOperands = [&](unsigned OpNum) -> auto {
2579     return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
2580   };
2581 
2582   switch (Op1->getOpcode()) {
2583   default:
2584     break;
2585   case Instruction::Add:
2586   case Instruction::Sub:
2587     if (Op1->getOperand(0) == Op2->getOperand(0))
2588       return getOperands(1);
2589     if (Op1->getOperand(1) == Op2->getOperand(1))
2590       return getOperands(0);
2591     break;
2592   case Instruction::Mul: {
2593     // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2594     // and N is the bitwdith.  The nsw case is non-obvious, but proven by
2595     // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2596     auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2597     auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2598     if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2599         (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2600       break;
2601 
2602     // Assume operand order has been canonicalized
2603     if (Op1->getOperand(1) == Op2->getOperand(1) &&
2604         isa<ConstantInt>(Op1->getOperand(1)) &&
2605         !cast<ConstantInt>(Op1->getOperand(1))->isZero())
2606       return getOperands(0);
2607     break;
2608   }
2609   case Instruction::Shl: {
2610     // Same as multiplies, with the difference that we don't need to check
2611     // for a non-zero multiply. Shifts always multiply by non-zero.
2612     auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2613     auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2614     if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2615         (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2616       break;
2617 
2618     if (Op1->getOperand(1) == Op2->getOperand(1))
2619       return getOperands(0);
2620     break;
2621   }
2622   case Instruction::AShr:
2623   case Instruction::LShr: {
2624     auto *PEO1 = cast<PossiblyExactOperator>(Op1);
2625     auto *PEO2 = cast<PossiblyExactOperator>(Op2);
2626     if (!PEO1->isExact() || !PEO2->isExact())
2627       break;
2628 
2629     if (Op1->getOperand(1) == Op2->getOperand(1))
2630       return getOperands(0);
2631     break;
2632   }
2633   case Instruction::SExt:
2634   case Instruction::ZExt:
2635     if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
2636       return getOperands(0);
2637     break;
2638   case Instruction::PHI: {
2639     const PHINode *PN1 = cast<PHINode>(Op1);
2640     const PHINode *PN2 = cast<PHINode>(Op2);
2641 
2642     // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
2643     // are a single invertible function of the start values? Note that repeated
2644     // application of an invertible function is also invertible
2645     BinaryOperator *BO1 = nullptr;
2646     Value *Start1 = nullptr, *Step1 = nullptr;
2647     BinaryOperator *BO2 = nullptr;
2648     Value *Start2 = nullptr, *Step2 = nullptr;
2649     if (PN1->getParent() != PN2->getParent() ||
2650         !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
2651         !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
2652       break;
2653 
2654     auto Values = getInvertibleOperands(cast<Operator>(BO1),
2655                                         cast<Operator>(BO2));
2656     if (!Values)
2657        break;
2658 
2659     // We have to be careful of mutually defined recurrences here.  Ex:
2660     // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
2661     // * X_i = Y_i = X_(i-1) OP Y_(i-1)
2662     // The invertibility of these is complicated, and not worth reasoning
2663     // about (yet?).
2664     if (Values->first != PN1 || Values->second != PN2)
2665       break;
2666 
2667     return std::make_pair(Start1, Start2);
2668   }
2669   }
2670   return None;
2671 }
2672 
2673 /// Return true if V2 == V1 + X, where X is known non-zero.
2674 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2675                            const Query &Q) {
2676   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2677   if (!BO || BO->getOpcode() != Instruction::Add)
2678     return false;
2679   Value *Op = nullptr;
2680   if (V2 == BO->getOperand(0))
2681     Op = BO->getOperand(1);
2682   else if (V2 == BO->getOperand(1))
2683     Op = BO->getOperand(0);
2684   else
2685     return false;
2686   return isKnownNonZero(Op, Depth + 1, Q);
2687 }
2688 
2689 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2690 /// the multiplication is nuw or nsw.
2691 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2692                           const Query &Q) {
2693   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2694     const APInt *C;
2695     return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2696            (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2697            !C->isNullValue() && !C->isOneValue() &&
2698            isKnownNonZero(V1, Depth + 1, Q);
2699   }
2700   return false;
2701 }
2702 
2703 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
2704 /// the shift is nuw or nsw.
2705 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
2706                           const Query &Q) {
2707   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2708     const APInt *C;
2709     return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
2710            (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2711            !C->isNullValue() && isKnownNonZero(V1, Depth + 1, Q);
2712   }
2713   return false;
2714 }
2715 
2716 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
2717                            unsigned Depth, const Query &Q) {
2718   // Check two PHIs are in same block.
2719   if (PN1->getParent() != PN2->getParent())
2720     return false;
2721 
2722   SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
2723   bool UsedFullRecursion = false;
2724   for (const BasicBlock *IncomBB : PN1->blocks()) {
2725     if (!VisitedBBs.insert(IncomBB).second)
2726       continue; // Don't reprocess blocks that we have dealt with already.
2727     const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
2728     const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
2729     const APInt *C1, *C2;
2730     if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
2731       continue;
2732 
2733     // Only one pair of phi operands is allowed for full recursion.
2734     if (UsedFullRecursion)
2735       return false;
2736 
2737     Query RecQ = Q;
2738     RecQ.CxtI = IncomBB->getTerminator();
2739     if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
2740       return false;
2741     UsedFullRecursion = true;
2742   }
2743   return true;
2744 }
2745 
2746 /// Return true if it is known that V1 != V2.
2747 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2748                             const Query &Q) {
2749   if (V1 == V2)
2750     return false;
2751   if (V1->getType() != V2->getType())
2752     // We can't look through casts yet.
2753     return false;
2754 
2755   if (Depth >= MaxAnalysisRecursionDepth)
2756     return false;
2757 
2758   // See if we can recurse through (exactly one of) our operands.  This
2759   // requires our operation be 1-to-1 and map every input value to exactly
2760   // one output value.  Such an operation is invertible.
2761   auto *O1 = dyn_cast<Operator>(V1);
2762   auto *O2 = dyn_cast<Operator>(V2);
2763   if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2764     if (auto Values = getInvertibleOperands(O1, O2))
2765       return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
2766 
2767     if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
2768       const PHINode *PN2 = cast<PHINode>(V2);
2769       // FIXME: This is missing a generalization to handle the case where one is
2770       // a PHI and another one isn't.
2771       if (isNonEqualPHIs(PN1, PN2, Depth, Q))
2772         return true;
2773     };
2774   }
2775 
2776   if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2777     return true;
2778 
2779   if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2780     return true;
2781 
2782   if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
2783     return true;
2784 
2785   if (V1->getType()->isIntOrIntVectorTy()) {
2786     // Are any known bits in V1 contradictory to known bits in V2? If V1
2787     // has a known zero where V2 has a known one, they must not be equal.
2788     KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2789     KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2790 
2791     if (Known1.Zero.intersects(Known2.One) ||
2792         Known2.Zero.intersects(Known1.One))
2793       return true;
2794   }
2795   return false;
2796 }
2797 
2798 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2799 /// simplify operations downstream. Mask is known to be zero for bits that V
2800 /// cannot have.
2801 ///
2802 /// This function is defined on values with integer type, values with pointer
2803 /// type, and vectors of integers.  In the case
2804 /// where V is a vector, the mask, known zero, and known one values are the
2805 /// same width as the vector element, and the bit is set only if it is true
2806 /// for all of the elements in the vector.
2807 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2808                        const Query &Q) {
2809   KnownBits Known(Mask.getBitWidth());
2810   computeKnownBits(V, Known, Depth, Q);
2811   return Mask.isSubsetOf(Known.Zero);
2812 }
2813 
2814 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2815 // Returns the input and lower/upper bounds.
2816 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2817                                 const APInt *&CLow, const APInt *&CHigh) {
2818   assert(isa<Operator>(Select) &&
2819          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2820          "Input should be a Select!");
2821 
2822   const Value *LHS = nullptr, *RHS = nullptr;
2823   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2824   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2825     return false;
2826 
2827   if (!match(RHS, m_APInt(CLow)))
2828     return false;
2829 
2830   const Value *LHS2 = nullptr, *RHS2 = nullptr;
2831   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2832   if (getInverseMinMaxFlavor(SPF) != SPF2)
2833     return false;
2834 
2835   if (!match(RHS2, m_APInt(CHigh)))
2836     return false;
2837 
2838   if (SPF == SPF_SMIN)
2839     std::swap(CLow, CHigh);
2840 
2841   In = LHS2;
2842   return CLow->sle(*CHigh);
2843 }
2844 
2845 /// For vector constants, loop over the elements and find the constant with the
2846 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2847 /// or if any element was not analyzed; otherwise, return the count for the
2848 /// element with the minimum number of sign bits.
2849 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2850                                                  const APInt &DemandedElts,
2851                                                  unsigned TyBits) {
2852   const auto *CV = dyn_cast<Constant>(V);
2853   if (!CV || !isa<FixedVectorType>(CV->getType()))
2854     return 0;
2855 
2856   unsigned MinSignBits = TyBits;
2857   unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2858   for (unsigned i = 0; i != NumElts; ++i) {
2859     if (!DemandedElts[i])
2860       continue;
2861     // If we find a non-ConstantInt, bail out.
2862     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2863     if (!Elt)
2864       return 0;
2865 
2866     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2867   }
2868 
2869   return MinSignBits;
2870 }
2871 
2872 static unsigned ComputeNumSignBitsImpl(const Value *V,
2873                                        const APInt &DemandedElts,
2874                                        unsigned Depth, const Query &Q);
2875 
2876 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2877                                    unsigned Depth, const Query &Q) {
2878   unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2879   assert(Result > 0 && "At least one sign bit needs to be present!");
2880   return Result;
2881 }
2882 
2883 /// Return the number of times the sign bit of the register is replicated into
2884 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2885 /// (itself), but other cases can give us information. For example, immediately
2886 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2887 /// other, so we return 3. For vectors, return the number of sign bits for the
2888 /// vector element with the minimum number of known sign bits of the demanded
2889 /// elements in the vector specified by DemandedElts.
2890 static unsigned ComputeNumSignBitsImpl(const Value *V,
2891                                        const APInt &DemandedElts,
2892                                        unsigned Depth, const Query &Q) {
2893   Type *Ty = V->getType();
2894 
2895   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2896   // vector
2897   if (isa<ScalableVectorType>(Ty))
2898     return 1;
2899 
2900 #ifndef NDEBUG
2901   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2902 
2903   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2904     assert(
2905         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2906         "DemandedElt width should equal the fixed vector number of elements");
2907   } else {
2908     assert(DemandedElts == APInt(1, 1) &&
2909            "DemandedElt width should be 1 for scalars");
2910   }
2911 #endif
2912 
2913   // We return the minimum number of sign bits that are guaranteed to be present
2914   // in V, so for undef we have to conservatively return 1.  We don't have the
2915   // same behavior for poison though -- that's a FIXME today.
2916 
2917   Type *ScalarTy = Ty->getScalarType();
2918   unsigned TyBits = ScalarTy->isPointerTy() ?
2919     Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2920     Q.DL.getTypeSizeInBits(ScalarTy);
2921 
2922   unsigned Tmp, Tmp2;
2923   unsigned FirstAnswer = 1;
2924 
2925   // Note that ConstantInt is handled by the general computeKnownBits case
2926   // below.
2927 
2928   if (Depth == MaxAnalysisRecursionDepth)
2929     return 1;
2930 
2931   if (auto *U = dyn_cast<Operator>(V)) {
2932     switch (Operator::getOpcode(V)) {
2933     default: break;
2934     case Instruction::SExt:
2935       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2936       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2937 
2938     case Instruction::SDiv: {
2939       const APInt *Denominator;
2940       // sdiv X, C -> adds log(C) sign bits.
2941       if (match(U->getOperand(1), m_APInt(Denominator))) {
2942 
2943         // Ignore non-positive denominator.
2944         if (!Denominator->isStrictlyPositive())
2945           break;
2946 
2947         // Calculate the incoming numerator bits.
2948         unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2949 
2950         // Add floor(log(C)) bits to the numerator bits.
2951         return std::min(TyBits, NumBits + Denominator->logBase2());
2952       }
2953       break;
2954     }
2955 
2956     case Instruction::SRem: {
2957       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2958 
2959       const APInt *Denominator;
2960       // srem X, C -> we know that the result is within [-C+1,C) when C is a
2961       // positive constant.  This let us put a lower bound on the number of sign
2962       // bits.
2963       if (match(U->getOperand(1), m_APInt(Denominator))) {
2964 
2965         // Ignore non-positive denominator.
2966         if (Denominator->isStrictlyPositive()) {
2967           // Calculate the leading sign bit constraints by examining the
2968           // denominator.  Given that the denominator is positive, there are two
2969           // cases:
2970           //
2971           //  1. The numerator is positive. The result range is [0,C) and
2972           //     [0,C) u< (1 << ceilLogBase2(C)).
2973           //
2974           //  2. The numerator is negative. Then the result range is (-C,0] and
2975           //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2976           //
2977           // Thus a lower bound on the number of sign bits is `TyBits -
2978           // ceilLogBase2(C)`.
2979 
2980           unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2981           Tmp = std::max(Tmp, ResBits);
2982         }
2983       }
2984       return Tmp;
2985     }
2986 
2987     case Instruction::AShr: {
2988       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2989       // ashr X, C   -> adds C sign bits.  Vectors too.
2990       const APInt *ShAmt;
2991       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2992         if (ShAmt->uge(TyBits))
2993           break; // Bad shift.
2994         unsigned ShAmtLimited = ShAmt->getZExtValue();
2995         Tmp += ShAmtLimited;
2996         if (Tmp > TyBits) Tmp = TyBits;
2997       }
2998       return Tmp;
2999     }
3000     case Instruction::Shl: {
3001       const APInt *ShAmt;
3002       if (match(U->getOperand(1), m_APInt(ShAmt))) {
3003         // shl destroys sign bits.
3004         Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3005         if (ShAmt->uge(TyBits) ||   // Bad shift.
3006             ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3007         Tmp2 = ShAmt->getZExtValue();
3008         return Tmp - Tmp2;
3009       }
3010       break;
3011     }
3012     case Instruction::And:
3013     case Instruction::Or:
3014     case Instruction::Xor: // NOT is handled here.
3015       // Logical binary ops preserve the number of sign bits at the worst.
3016       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3017       if (Tmp != 1) {
3018         Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3019         FirstAnswer = std::min(Tmp, Tmp2);
3020         // We computed what we know about the sign bits as our first
3021         // answer. Now proceed to the generic code that uses
3022         // computeKnownBits, and pick whichever answer is better.
3023       }
3024       break;
3025 
3026     case Instruction::Select: {
3027       // If we have a clamp pattern, we know that the number of sign bits will
3028       // be the minimum of the clamp min/max range.
3029       const Value *X;
3030       const APInt *CLow, *CHigh;
3031       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3032         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3033 
3034       Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3035       if (Tmp == 1) break;
3036       Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3037       return std::min(Tmp, Tmp2);
3038     }
3039 
3040     case Instruction::Add:
3041       // Add can have at most one carry bit.  Thus we know that the output
3042       // is, at worst, one more bit than the inputs.
3043       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3044       if (Tmp == 1) break;
3045 
3046       // Special case decrementing a value (ADD X, -1):
3047       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3048         if (CRHS->isAllOnesValue()) {
3049           KnownBits Known(TyBits);
3050           computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3051 
3052           // If the input is known to be 0 or 1, the output is 0/-1, which is
3053           // all sign bits set.
3054           if ((Known.Zero | 1).isAllOnesValue())
3055             return TyBits;
3056 
3057           // If we are subtracting one from a positive number, there is no carry
3058           // out of the result.
3059           if (Known.isNonNegative())
3060             return Tmp;
3061         }
3062 
3063       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3064       if (Tmp2 == 1) break;
3065       return std::min(Tmp, Tmp2) - 1;
3066 
3067     case Instruction::Sub:
3068       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3069       if (Tmp2 == 1) break;
3070 
3071       // Handle NEG.
3072       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3073         if (CLHS->isNullValue()) {
3074           KnownBits Known(TyBits);
3075           computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3076           // If the input is known to be 0 or 1, the output is 0/-1, which is
3077           // all sign bits set.
3078           if ((Known.Zero | 1).isAllOnesValue())
3079             return TyBits;
3080 
3081           // If the input is known to be positive (the sign bit is known clear),
3082           // the output of the NEG has the same number of sign bits as the
3083           // input.
3084           if (Known.isNonNegative())
3085             return Tmp2;
3086 
3087           // Otherwise, we treat this like a SUB.
3088         }
3089 
3090       // Sub can have at most one carry bit.  Thus we know that the output
3091       // is, at worst, one more bit than the inputs.
3092       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3093       if (Tmp == 1) break;
3094       return std::min(Tmp, Tmp2) - 1;
3095 
3096     case Instruction::Mul: {
3097       // The output of the Mul can be at most twice the valid bits in the
3098       // inputs.
3099       unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3100       if (SignBitsOp0 == 1) break;
3101       unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3102       if (SignBitsOp1 == 1) break;
3103       unsigned OutValidBits =
3104           (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3105       return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3106     }
3107 
3108     case Instruction::PHI: {
3109       const PHINode *PN = cast<PHINode>(U);
3110       unsigned NumIncomingValues = PN->getNumIncomingValues();
3111       // Don't analyze large in-degree PHIs.
3112       if (NumIncomingValues > 4) break;
3113       // Unreachable blocks may have zero-operand PHI nodes.
3114       if (NumIncomingValues == 0) break;
3115 
3116       // Take the minimum of all incoming values.  This can't infinitely loop
3117       // because of our depth threshold.
3118       Query RecQ = Q;
3119       Tmp = TyBits;
3120       for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3121         if (Tmp == 1) return Tmp;
3122         RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3123         Tmp = std::min(
3124             Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3125       }
3126       return Tmp;
3127     }
3128 
3129     case Instruction::Trunc:
3130       // FIXME: it's tricky to do anything useful for this, but it is an
3131       // important case for targets like X86.
3132       break;
3133 
3134     case Instruction::ExtractElement:
3135       // Look through extract element. At the moment we keep this simple and
3136       // skip tracking the specific element. But at least we might find
3137       // information valid for all elements of the vector (for example if vector
3138       // is sign extended, shifted, etc).
3139       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3140 
3141     case Instruction::ShuffleVector: {
3142       // Collect the minimum number of sign bits that are shared by every vector
3143       // element referenced by the shuffle.
3144       auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3145       if (!Shuf) {
3146         // FIXME: Add support for shufflevector constant expressions.
3147         return 1;
3148       }
3149       APInt DemandedLHS, DemandedRHS;
3150       // For undef elements, we don't know anything about the common state of
3151       // the shuffle result.
3152       if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3153         return 1;
3154       Tmp = std::numeric_limits<unsigned>::max();
3155       if (!!DemandedLHS) {
3156         const Value *LHS = Shuf->getOperand(0);
3157         Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3158       }
3159       // If we don't know anything, early out and try computeKnownBits
3160       // fall-back.
3161       if (Tmp == 1)
3162         break;
3163       if (!!DemandedRHS) {
3164         const Value *RHS = Shuf->getOperand(1);
3165         Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3166         Tmp = std::min(Tmp, Tmp2);
3167       }
3168       // If we don't know anything, early out and try computeKnownBits
3169       // fall-back.
3170       if (Tmp == 1)
3171         break;
3172       assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
3173       return Tmp;
3174     }
3175     case Instruction::Call: {
3176       if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3177         switch (II->getIntrinsicID()) {
3178         default: break;
3179         case Intrinsic::abs:
3180           Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3181           if (Tmp == 1) break;
3182 
3183           // Absolute value reduces number of sign bits by at most 1.
3184           return Tmp - 1;
3185         }
3186       }
3187     }
3188     }
3189   }
3190 
3191   // Finally, if we can prove that the top bits of the result are 0's or 1's,
3192   // use this information.
3193 
3194   // If we can examine all elements of a vector constant successfully, we're
3195   // done (we can't do any better than that). If not, keep trying.
3196   if (unsigned VecSignBits =
3197           computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3198     return VecSignBits;
3199 
3200   KnownBits Known(TyBits);
3201   computeKnownBits(V, DemandedElts, Known, Depth, Q);
3202 
3203   // If we know that the sign bit is either zero or one, determine the number of
3204   // identical bits in the top of the input value.
3205   return std::max(FirstAnswer, Known.countMinSignBits());
3206 }
3207 
3208 /// This function computes the integer multiple of Base that equals V.
3209 /// If successful, it returns true and returns the multiple in
3210 /// Multiple. If unsuccessful, it returns false. It looks
3211 /// through SExt instructions only if LookThroughSExt is true.
3212 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
3213                            bool LookThroughSExt, unsigned Depth) {
3214   assert(V && "No Value?");
3215   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3216   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
3217 
3218   Type *T = V->getType();
3219 
3220   ConstantInt *CI = dyn_cast<ConstantInt>(V);
3221 
3222   if (Base == 0)
3223     return false;
3224 
3225   if (Base == 1) {
3226     Multiple = V;
3227     return true;
3228   }
3229 
3230   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3231   Constant *BaseVal = ConstantInt::get(T, Base);
3232   if (CO && CO == BaseVal) {
3233     // Multiple is 1.
3234     Multiple = ConstantInt::get(T, 1);
3235     return true;
3236   }
3237 
3238   if (CI && CI->getZExtValue() % Base == 0) {
3239     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3240     return true;
3241   }
3242 
3243   if (Depth == MaxAnalysisRecursionDepth) return false;
3244 
3245   Operator *I = dyn_cast<Operator>(V);
3246   if (!I) return false;
3247 
3248   switch (I->getOpcode()) {
3249   default: break;
3250   case Instruction::SExt:
3251     if (!LookThroughSExt) return false;
3252     // otherwise fall through to ZExt
3253     LLVM_FALLTHROUGH;
3254   case Instruction::ZExt:
3255     return ComputeMultiple(I->getOperand(0), Base, Multiple,
3256                            LookThroughSExt, Depth+1);
3257   case Instruction::Shl:
3258   case Instruction::Mul: {
3259     Value *Op0 = I->getOperand(0);
3260     Value *Op1 = I->getOperand(1);
3261 
3262     if (I->getOpcode() == Instruction::Shl) {
3263       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3264       if (!Op1CI) return false;
3265       // Turn Op0 << Op1 into Op0 * 2^Op1
3266       APInt Op1Int = Op1CI->getValue();
3267       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3268       APInt API(Op1Int.getBitWidth(), 0);
3269       API.setBit(BitToSet);
3270       Op1 = ConstantInt::get(V->getContext(), API);
3271     }
3272 
3273     Value *Mul0 = nullptr;
3274     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3275       if (Constant *Op1C = dyn_cast<Constant>(Op1))
3276         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3277           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3278               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3279             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3280           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3281               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3282             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3283 
3284           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3285           Multiple = ConstantExpr::getMul(MulC, Op1C);
3286           return true;
3287         }
3288 
3289       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3290         if (Mul0CI->getValue() == 1) {
3291           // V == Base * Op1, so return Op1
3292           Multiple = Op1;
3293           return true;
3294         }
3295     }
3296 
3297     Value *Mul1 = nullptr;
3298     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3299       if (Constant *Op0C = dyn_cast<Constant>(Op0))
3300         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3301           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3302               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3303             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3304           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3305               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3306             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3307 
3308           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3309           Multiple = ConstantExpr::getMul(MulC, Op0C);
3310           return true;
3311         }
3312 
3313       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3314         if (Mul1CI->getValue() == 1) {
3315           // V == Base * Op0, so return Op0
3316           Multiple = Op0;
3317           return true;
3318         }
3319     }
3320   }
3321   }
3322 
3323   // We could not determine if V is a multiple of Base.
3324   return false;
3325 }
3326 
3327 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3328                                             const TargetLibraryInfo *TLI) {
3329   const Function *F = CB.getCalledFunction();
3330   if (!F)
3331     return Intrinsic::not_intrinsic;
3332 
3333   if (F->isIntrinsic())
3334     return F->getIntrinsicID();
3335 
3336   // We are going to infer semantics of a library function based on mapping it
3337   // to an LLVM intrinsic. Check that the library function is available from
3338   // this callbase and in this environment.
3339   LibFunc Func;
3340   if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3341       !CB.onlyReadsMemory())
3342     return Intrinsic::not_intrinsic;
3343 
3344   switch (Func) {
3345   default:
3346     break;
3347   case LibFunc_sin:
3348   case LibFunc_sinf:
3349   case LibFunc_sinl:
3350     return Intrinsic::sin;
3351   case LibFunc_cos:
3352   case LibFunc_cosf:
3353   case LibFunc_cosl:
3354     return Intrinsic::cos;
3355   case LibFunc_exp:
3356   case LibFunc_expf:
3357   case LibFunc_expl:
3358     return Intrinsic::exp;
3359   case LibFunc_exp2:
3360   case LibFunc_exp2f:
3361   case LibFunc_exp2l:
3362     return Intrinsic::exp2;
3363   case LibFunc_log:
3364   case LibFunc_logf:
3365   case LibFunc_logl:
3366     return Intrinsic::log;
3367   case LibFunc_log10:
3368   case LibFunc_log10f:
3369   case LibFunc_log10l:
3370     return Intrinsic::log10;
3371   case LibFunc_log2:
3372   case LibFunc_log2f:
3373   case LibFunc_log2l:
3374     return Intrinsic::log2;
3375   case LibFunc_fabs:
3376   case LibFunc_fabsf:
3377   case LibFunc_fabsl:
3378     return Intrinsic::fabs;
3379   case LibFunc_fmin:
3380   case LibFunc_fminf:
3381   case LibFunc_fminl:
3382     return Intrinsic::minnum;
3383   case LibFunc_fmax:
3384   case LibFunc_fmaxf:
3385   case LibFunc_fmaxl:
3386     return Intrinsic::maxnum;
3387   case LibFunc_copysign:
3388   case LibFunc_copysignf:
3389   case LibFunc_copysignl:
3390     return Intrinsic::copysign;
3391   case LibFunc_floor:
3392   case LibFunc_floorf:
3393   case LibFunc_floorl:
3394     return Intrinsic::floor;
3395   case LibFunc_ceil:
3396   case LibFunc_ceilf:
3397   case LibFunc_ceill:
3398     return Intrinsic::ceil;
3399   case LibFunc_trunc:
3400   case LibFunc_truncf:
3401   case LibFunc_truncl:
3402     return Intrinsic::trunc;
3403   case LibFunc_rint:
3404   case LibFunc_rintf:
3405   case LibFunc_rintl:
3406     return Intrinsic::rint;
3407   case LibFunc_nearbyint:
3408   case LibFunc_nearbyintf:
3409   case LibFunc_nearbyintl:
3410     return Intrinsic::nearbyint;
3411   case LibFunc_round:
3412   case LibFunc_roundf:
3413   case LibFunc_roundl:
3414     return Intrinsic::round;
3415   case LibFunc_roundeven:
3416   case LibFunc_roundevenf:
3417   case LibFunc_roundevenl:
3418     return Intrinsic::roundeven;
3419   case LibFunc_pow:
3420   case LibFunc_powf:
3421   case LibFunc_powl:
3422     return Intrinsic::pow;
3423   case LibFunc_sqrt:
3424   case LibFunc_sqrtf:
3425   case LibFunc_sqrtl:
3426     return Intrinsic::sqrt;
3427   }
3428 
3429   return Intrinsic::not_intrinsic;
3430 }
3431 
3432 /// Return true if we can prove that the specified FP value is never equal to
3433 /// -0.0.
3434 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3435 ///       that a value is not -0.0. It only guarantees that -0.0 may be treated
3436 ///       the same as +0.0 in floating-point ops.
3437 ///
3438 /// NOTE: this function will need to be revisited when we support non-default
3439 /// rounding modes!
3440 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3441                                 unsigned Depth) {
3442   if (auto *CFP = dyn_cast<ConstantFP>(V))
3443     return !CFP->getValueAPF().isNegZero();
3444 
3445   if (Depth == MaxAnalysisRecursionDepth)
3446     return false;
3447 
3448   auto *Op = dyn_cast<Operator>(V);
3449   if (!Op)
3450     return false;
3451 
3452   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3453   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3454     return true;
3455 
3456   // sitofp and uitofp turn into +0.0 for zero.
3457   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3458     return true;
3459 
3460   if (auto *Call = dyn_cast<CallInst>(Op)) {
3461     Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3462     switch (IID) {
3463     default:
3464       break;
3465     // sqrt(-0.0) = -0.0, no other negative results are possible.
3466     case Intrinsic::sqrt:
3467     case Intrinsic::canonicalize:
3468       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3469     // fabs(x) != -0.0
3470     case Intrinsic::fabs:
3471       return true;
3472     }
3473   }
3474 
3475   return false;
3476 }
3477 
3478 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3479 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3480 /// bit despite comparing equal.
3481 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3482                                             const TargetLibraryInfo *TLI,
3483                                             bool SignBitOnly,
3484                                             unsigned Depth) {
3485   // TODO: This function does not do the right thing when SignBitOnly is true
3486   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3487   // which flips the sign bits of NaNs.  See
3488   // https://llvm.org/bugs/show_bug.cgi?id=31702.
3489 
3490   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3491     return !CFP->getValueAPF().isNegative() ||
3492            (!SignBitOnly && CFP->getValueAPF().isZero());
3493   }
3494 
3495   // Handle vector of constants.
3496   if (auto *CV = dyn_cast<Constant>(V)) {
3497     if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3498       unsigned NumElts = CVFVTy->getNumElements();
3499       for (unsigned i = 0; i != NumElts; ++i) {
3500         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3501         if (!CFP)
3502           return false;
3503         if (CFP->getValueAPF().isNegative() &&
3504             (SignBitOnly || !CFP->getValueAPF().isZero()))
3505           return false;
3506       }
3507 
3508       // All non-negative ConstantFPs.
3509       return true;
3510     }
3511   }
3512 
3513   if (Depth == MaxAnalysisRecursionDepth)
3514     return false;
3515 
3516   const Operator *I = dyn_cast<Operator>(V);
3517   if (!I)
3518     return false;
3519 
3520   switch (I->getOpcode()) {
3521   default:
3522     break;
3523   // Unsigned integers are always nonnegative.
3524   case Instruction::UIToFP:
3525     return true;
3526   case Instruction::FMul:
3527   case Instruction::FDiv:
3528     // X * X is always non-negative or a NaN.
3529     // X / X is always exactly 1.0 or a NaN.
3530     if (I->getOperand(0) == I->getOperand(1) &&
3531         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3532       return true;
3533 
3534     LLVM_FALLTHROUGH;
3535   case Instruction::FAdd:
3536   case Instruction::FRem:
3537     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3538                                            Depth + 1) &&
3539            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3540                                            Depth + 1);
3541   case Instruction::Select:
3542     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3543                                            Depth + 1) &&
3544            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3545                                            Depth + 1);
3546   case Instruction::FPExt:
3547   case Instruction::FPTrunc:
3548     // Widening/narrowing never change sign.
3549     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3550                                            Depth + 1);
3551   case Instruction::ExtractElement:
3552     // Look through extract element. At the moment we keep this simple and skip
3553     // tracking the specific element. But at least we might find information
3554     // valid for all elements of the vector.
3555     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3556                                            Depth + 1);
3557   case Instruction::Call:
3558     const auto *CI = cast<CallInst>(I);
3559     Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3560     switch (IID) {
3561     default:
3562       break;
3563     case Intrinsic::maxnum: {
3564       Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3565       auto isPositiveNum = [&](Value *V) {
3566         if (SignBitOnly) {
3567           // With SignBitOnly, this is tricky because the result of
3568           // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3569           // a constant strictly greater than 0.0.
3570           const APFloat *C;
3571           return match(V, m_APFloat(C)) &&
3572                  *C > APFloat::getZero(C->getSemantics());
3573         }
3574 
3575         // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3576         // maxnum can't be ordered-less-than-zero.
3577         return isKnownNeverNaN(V, TLI) &&
3578                cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3579       };
3580 
3581       // TODO: This could be improved. We could also check that neither operand
3582       //       has its sign bit set (and at least 1 is not-NAN?).
3583       return isPositiveNum(V0) || isPositiveNum(V1);
3584     }
3585 
3586     case Intrinsic::maximum:
3587       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3588                                              Depth + 1) ||
3589              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3590                                              Depth + 1);
3591     case Intrinsic::minnum:
3592     case Intrinsic::minimum:
3593       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3594                                              Depth + 1) &&
3595              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3596                                              Depth + 1);
3597     case Intrinsic::exp:
3598     case Intrinsic::exp2:
3599     case Intrinsic::fabs:
3600       return true;
3601 
3602     case Intrinsic::sqrt:
3603       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3604       if (!SignBitOnly)
3605         return true;
3606       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3607                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3608 
3609     case Intrinsic::powi:
3610       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3611         // powi(x,n) is non-negative if n is even.
3612         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3613           return true;
3614       }
3615       // TODO: This is not correct.  Given that exp is an integer, here are the
3616       // ways that pow can return a negative value:
3617       //
3618       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3619       //   pow(-0, exp)   --> -inf if exp is negative odd.
3620       //   pow(-0, exp)   --> -0 if exp is positive odd.
3621       //   pow(-inf, exp) --> -0 if exp is negative odd.
3622       //   pow(-inf, exp) --> -inf if exp is positive odd.
3623       //
3624       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3625       // but we must return false if x == -0.  Unfortunately we do not currently
3626       // have a way of expressing this constraint.  See details in
3627       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3628       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3629                                              Depth + 1);
3630 
3631     case Intrinsic::fma:
3632     case Intrinsic::fmuladd:
3633       // x*x+y is non-negative if y is non-negative.
3634       return I->getOperand(0) == I->getOperand(1) &&
3635              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3636              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3637                                              Depth + 1);
3638     }
3639     break;
3640   }
3641   return false;
3642 }
3643 
3644 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3645                                        const TargetLibraryInfo *TLI) {
3646   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3647 }
3648 
3649 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3650   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3651 }
3652 
3653 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3654                                 unsigned Depth) {
3655   assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3656 
3657   // If we're told that infinities won't happen, assume they won't.
3658   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3659     if (FPMathOp->hasNoInfs())
3660       return true;
3661 
3662   // Handle scalar constants.
3663   if (auto *CFP = dyn_cast<ConstantFP>(V))
3664     return !CFP->isInfinity();
3665 
3666   if (Depth == MaxAnalysisRecursionDepth)
3667     return false;
3668 
3669   if (auto *Inst = dyn_cast<Instruction>(V)) {
3670     switch (Inst->getOpcode()) {
3671     case Instruction::Select: {
3672       return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3673              isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3674     }
3675     case Instruction::SIToFP:
3676     case Instruction::UIToFP: {
3677       // Get width of largest magnitude integer (remove a bit if signed).
3678       // This still works for a signed minimum value because the largest FP
3679       // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3680       int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3681       if (Inst->getOpcode() == Instruction::SIToFP)
3682         --IntSize;
3683 
3684       // If the exponent of the largest finite FP value can hold the largest
3685       // integer, the result of the cast must be finite.
3686       Type *FPTy = Inst->getType()->getScalarType();
3687       return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3688     }
3689     default:
3690       break;
3691     }
3692   }
3693 
3694   // try to handle fixed width vector constants
3695   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3696   if (VFVTy && isa<Constant>(V)) {
3697     // For vectors, verify that each element is not infinity.
3698     unsigned NumElts = VFVTy->getNumElements();
3699     for (unsigned i = 0; i != NumElts; ++i) {
3700       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3701       if (!Elt)
3702         return false;
3703       if (isa<UndefValue>(Elt))
3704         continue;
3705       auto *CElt = dyn_cast<ConstantFP>(Elt);
3706       if (!CElt || CElt->isInfinity())
3707         return false;
3708     }
3709     // All elements were confirmed non-infinity or undefined.
3710     return true;
3711   }
3712 
3713   // was not able to prove that V never contains infinity
3714   return false;
3715 }
3716 
3717 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3718                            unsigned Depth) {
3719   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3720 
3721   // If we're told that NaNs won't happen, assume they won't.
3722   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3723     if (FPMathOp->hasNoNaNs())
3724       return true;
3725 
3726   // Handle scalar constants.
3727   if (auto *CFP = dyn_cast<ConstantFP>(V))
3728     return !CFP->isNaN();
3729 
3730   if (Depth == MaxAnalysisRecursionDepth)
3731     return false;
3732 
3733   if (auto *Inst = dyn_cast<Instruction>(V)) {
3734     switch (Inst->getOpcode()) {
3735     case Instruction::FAdd:
3736     case Instruction::FSub:
3737       // Adding positive and negative infinity produces NaN.
3738       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3739              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3740              (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3741               isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3742 
3743     case Instruction::FMul:
3744       // Zero multiplied with infinity produces NaN.
3745       // FIXME: If neither side can be zero fmul never produces NaN.
3746       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3747              isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3748              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3749              isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3750 
3751     case Instruction::FDiv:
3752     case Instruction::FRem:
3753       // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3754       return false;
3755 
3756     case Instruction::Select: {
3757       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3758              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3759     }
3760     case Instruction::SIToFP:
3761     case Instruction::UIToFP:
3762       return true;
3763     case Instruction::FPTrunc:
3764     case Instruction::FPExt:
3765       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3766     default:
3767       break;
3768     }
3769   }
3770 
3771   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3772     switch (II->getIntrinsicID()) {
3773     case Intrinsic::canonicalize:
3774     case Intrinsic::fabs:
3775     case Intrinsic::copysign:
3776     case Intrinsic::exp:
3777     case Intrinsic::exp2:
3778     case Intrinsic::floor:
3779     case Intrinsic::ceil:
3780     case Intrinsic::trunc:
3781     case Intrinsic::rint:
3782     case Intrinsic::nearbyint:
3783     case Intrinsic::round:
3784     case Intrinsic::roundeven:
3785       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3786     case Intrinsic::sqrt:
3787       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3788              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3789     case Intrinsic::minnum:
3790     case Intrinsic::maxnum:
3791       // If either operand is not NaN, the result is not NaN.
3792       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3793              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3794     default:
3795       return false;
3796     }
3797   }
3798 
3799   // Try to handle fixed width vector constants
3800   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3801   if (VFVTy && isa<Constant>(V)) {
3802     // For vectors, verify that each element is not NaN.
3803     unsigned NumElts = VFVTy->getNumElements();
3804     for (unsigned i = 0; i != NumElts; ++i) {
3805       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3806       if (!Elt)
3807         return false;
3808       if (isa<UndefValue>(Elt))
3809         continue;
3810       auto *CElt = dyn_cast<ConstantFP>(Elt);
3811       if (!CElt || CElt->isNaN())
3812         return false;
3813     }
3814     // All elements were confirmed not-NaN or undefined.
3815     return true;
3816   }
3817 
3818   // Was not able to prove that V never contains NaN
3819   return false;
3820 }
3821 
3822 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3823 
3824   // All byte-wide stores are splatable, even of arbitrary variables.
3825   if (V->getType()->isIntegerTy(8))
3826     return V;
3827 
3828   LLVMContext &Ctx = V->getContext();
3829 
3830   // Undef don't care.
3831   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3832   if (isa<UndefValue>(V))
3833     return UndefInt8;
3834 
3835   // Return Undef for zero-sized type.
3836   if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3837     return UndefInt8;
3838 
3839   Constant *C = dyn_cast<Constant>(V);
3840   if (!C) {
3841     // Conceptually, we could handle things like:
3842     //   %a = zext i8 %X to i16
3843     //   %b = shl i16 %a, 8
3844     //   %c = or i16 %a, %b
3845     // but until there is an example that actually needs this, it doesn't seem
3846     // worth worrying about.
3847     return nullptr;
3848   }
3849 
3850   // Handle 'null' ConstantArrayZero etc.
3851   if (C->isNullValue())
3852     return Constant::getNullValue(Type::getInt8Ty(Ctx));
3853 
3854   // Constant floating-point values can be handled as integer values if the
3855   // corresponding integer value is "byteable".  An important case is 0.0.
3856   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3857     Type *Ty = nullptr;
3858     if (CFP->getType()->isHalfTy())
3859       Ty = Type::getInt16Ty(Ctx);
3860     else if (CFP->getType()->isFloatTy())
3861       Ty = Type::getInt32Ty(Ctx);
3862     else if (CFP->getType()->isDoubleTy())
3863       Ty = Type::getInt64Ty(Ctx);
3864     // Don't handle long double formats, which have strange constraints.
3865     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3866               : nullptr;
3867   }
3868 
3869   // We can handle constant integers that are multiple of 8 bits.
3870   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3871     if (CI->getBitWidth() % 8 == 0) {
3872       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3873       if (!CI->getValue().isSplat(8))
3874         return nullptr;
3875       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3876     }
3877   }
3878 
3879   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3880     if (CE->getOpcode() == Instruction::IntToPtr) {
3881       if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3882         unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3883         return isBytewiseValue(
3884             ConstantExpr::getIntegerCast(CE->getOperand(0),
3885                                          Type::getIntNTy(Ctx, BitWidth), false),
3886             DL);
3887       }
3888     }
3889   }
3890 
3891   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3892     if (LHS == RHS)
3893       return LHS;
3894     if (!LHS || !RHS)
3895       return nullptr;
3896     if (LHS == UndefInt8)
3897       return RHS;
3898     if (RHS == UndefInt8)
3899       return LHS;
3900     return nullptr;
3901   };
3902 
3903   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3904     Value *Val = UndefInt8;
3905     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3906       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3907         return nullptr;
3908     return Val;
3909   }
3910 
3911   if (isa<ConstantAggregate>(C)) {
3912     Value *Val = UndefInt8;
3913     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3914       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3915         return nullptr;
3916     return Val;
3917   }
3918 
3919   // Don't try to handle the handful of other constants.
3920   return nullptr;
3921 }
3922 
3923 // This is the recursive version of BuildSubAggregate. It takes a few different
3924 // arguments. Idxs is the index within the nested struct From that we are
3925 // looking at now (which is of type IndexedType). IdxSkip is the number of
3926 // indices from Idxs that should be left out when inserting into the resulting
3927 // struct. To is the result struct built so far, new insertvalue instructions
3928 // build on that.
3929 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3930                                 SmallVectorImpl<unsigned> &Idxs,
3931                                 unsigned IdxSkip,
3932                                 Instruction *InsertBefore) {
3933   StructType *STy = dyn_cast<StructType>(IndexedType);
3934   if (STy) {
3935     // Save the original To argument so we can modify it
3936     Value *OrigTo = To;
3937     // General case, the type indexed by Idxs is a struct
3938     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3939       // Process each struct element recursively
3940       Idxs.push_back(i);
3941       Value *PrevTo = To;
3942       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3943                              InsertBefore);
3944       Idxs.pop_back();
3945       if (!To) {
3946         // Couldn't find any inserted value for this index? Cleanup
3947         while (PrevTo != OrigTo) {
3948           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3949           PrevTo = Del->getAggregateOperand();
3950           Del->eraseFromParent();
3951         }
3952         // Stop processing elements
3953         break;
3954       }
3955     }
3956     // If we successfully found a value for each of our subaggregates
3957     if (To)
3958       return To;
3959   }
3960   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3961   // the struct's elements had a value that was inserted directly. In the latter
3962   // case, perhaps we can't determine each of the subelements individually, but
3963   // we might be able to find the complete struct somewhere.
3964 
3965   // Find the value that is at that particular spot
3966   Value *V = FindInsertedValue(From, Idxs);
3967 
3968   if (!V)
3969     return nullptr;
3970 
3971   // Insert the value in the new (sub) aggregate
3972   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3973                                  "tmp", InsertBefore);
3974 }
3975 
3976 // This helper takes a nested struct and extracts a part of it (which is again a
3977 // struct) into a new value. For example, given the struct:
3978 // { a, { b, { c, d }, e } }
3979 // and the indices "1, 1" this returns
3980 // { c, d }.
3981 //
3982 // It does this by inserting an insertvalue for each element in the resulting
3983 // struct, as opposed to just inserting a single struct. This will only work if
3984 // each of the elements of the substruct are known (ie, inserted into From by an
3985 // insertvalue instruction somewhere).
3986 //
3987 // All inserted insertvalue instructions are inserted before InsertBefore
3988 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3989                                 Instruction *InsertBefore) {
3990   assert(InsertBefore && "Must have someplace to insert!");
3991   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3992                                                              idx_range);
3993   Value *To = UndefValue::get(IndexedType);
3994   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3995   unsigned IdxSkip = Idxs.size();
3996 
3997   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3998 }
3999 
4000 /// Given an aggregate and a sequence of indices, see if the scalar value
4001 /// indexed is already around as a register, for example if it was inserted
4002 /// directly into the aggregate.
4003 ///
4004 /// If InsertBefore is not null, this function will duplicate (modified)
4005 /// insertvalues when a part of a nested struct is extracted.
4006 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
4007                                Instruction *InsertBefore) {
4008   // Nothing to index? Just return V then (this is useful at the end of our
4009   // recursion).
4010   if (idx_range.empty())
4011     return V;
4012   // We have indices, so V should have an indexable type.
4013   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
4014          "Not looking at a struct or array?");
4015   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
4016          "Invalid indices for type?");
4017 
4018   if (Constant *C = dyn_cast<Constant>(V)) {
4019     C = C->getAggregateElement(idx_range[0]);
4020     if (!C) return nullptr;
4021     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
4022   }
4023 
4024   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
4025     // Loop the indices for the insertvalue instruction in parallel with the
4026     // requested indices
4027     const unsigned *req_idx = idx_range.begin();
4028     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
4029          i != e; ++i, ++req_idx) {
4030       if (req_idx == idx_range.end()) {
4031         // We can't handle this without inserting insertvalues
4032         if (!InsertBefore)
4033           return nullptr;
4034 
4035         // The requested index identifies a part of a nested aggregate. Handle
4036         // this specially. For example,
4037         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
4038         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
4039         // %C = extractvalue {i32, { i32, i32 } } %B, 1
4040         // This can be changed into
4041         // %A = insertvalue {i32, i32 } undef, i32 10, 0
4042         // %C = insertvalue {i32, i32 } %A, i32 11, 1
4043         // which allows the unused 0,0 element from the nested struct to be
4044         // removed.
4045         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
4046                                  InsertBefore);
4047       }
4048 
4049       // This insert value inserts something else than what we are looking for.
4050       // See if the (aggregate) value inserted into has the value we are
4051       // looking for, then.
4052       if (*req_idx != *i)
4053         return FindInsertedValue(I->getAggregateOperand(), idx_range,
4054                                  InsertBefore);
4055     }
4056     // If we end up here, the indices of the insertvalue match with those
4057     // requested (though possibly only partially). Now we recursively look at
4058     // the inserted value, passing any remaining indices.
4059     return FindInsertedValue(I->getInsertedValueOperand(),
4060                              makeArrayRef(req_idx, idx_range.end()),
4061                              InsertBefore);
4062   }
4063 
4064   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
4065     // If we're extracting a value from an aggregate that was extracted from
4066     // something else, we can extract from that something else directly instead.
4067     // However, we will need to chain I's indices with the requested indices.
4068 
4069     // Calculate the number of indices required
4070     unsigned size = I->getNumIndices() + idx_range.size();
4071     // Allocate some space to put the new indices in
4072     SmallVector<unsigned, 5> Idxs;
4073     Idxs.reserve(size);
4074     // Add indices from the extract value instruction
4075     Idxs.append(I->idx_begin(), I->idx_end());
4076 
4077     // Add requested indices
4078     Idxs.append(idx_range.begin(), idx_range.end());
4079 
4080     assert(Idxs.size() == size
4081            && "Number of indices added not correct?");
4082 
4083     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4084   }
4085   // Otherwise, we don't know (such as, extracting from a function return value
4086   // or load instruction)
4087   return nullptr;
4088 }
4089 
4090 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
4091                                        unsigned CharSize) {
4092   // Make sure the GEP has exactly three arguments.
4093   if (GEP->getNumOperands() != 3)
4094     return false;
4095 
4096   // Make sure the index-ee is a pointer to array of \p CharSize integers.
4097   // CharSize.
4098   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4099   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4100     return false;
4101 
4102   // Check to make sure that the first operand of the GEP is an integer and
4103   // has value 0 so that we are sure we're indexing into the initializer.
4104   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4105   if (!FirstIdx || !FirstIdx->isZero())
4106     return false;
4107 
4108   return true;
4109 }
4110 
4111 bool llvm::getConstantDataArrayInfo(const Value *V,
4112                                     ConstantDataArraySlice &Slice,
4113                                     unsigned ElementSize, uint64_t Offset) {
4114   assert(V);
4115 
4116   // Look through bitcast instructions and geps.
4117   V = V->stripPointerCasts();
4118 
4119   // If the value is a GEP instruction or constant expression, treat it as an
4120   // offset.
4121   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4122     // The GEP operator should be based on a pointer to string constant, and is
4123     // indexing into the string constant.
4124     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
4125       return false;
4126 
4127     // If the second index isn't a ConstantInt, then this is a variable index
4128     // into the array.  If this occurs, we can't say anything meaningful about
4129     // the string.
4130     uint64_t StartIdx = 0;
4131     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
4132       StartIdx = CI->getZExtValue();
4133     else
4134       return false;
4135     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
4136                                     StartIdx + Offset);
4137   }
4138 
4139   // The GEP instruction, constant or instruction, must reference a global
4140   // variable that is a constant and is initialized. The referenced constant
4141   // initializer is the array that we'll use for optimization.
4142   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
4143   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4144     return false;
4145 
4146   const ConstantDataArray *Array;
4147   ArrayType *ArrayTy;
4148   if (GV->getInitializer()->isNullValue()) {
4149     Type *GVTy = GV->getValueType();
4150     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
4151       // A zeroinitializer for the array; there is no ConstantDataArray.
4152       Array = nullptr;
4153     } else {
4154       const DataLayout &DL = GV->getParent()->getDataLayout();
4155       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4156       uint64_t Length = SizeInBytes / (ElementSize / 8);
4157       if (Length <= Offset)
4158         return false;
4159 
4160       Slice.Array = nullptr;
4161       Slice.Offset = 0;
4162       Slice.Length = Length - Offset;
4163       return true;
4164     }
4165   } else {
4166     // This must be a ConstantDataArray.
4167     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
4168     if (!Array)
4169       return false;
4170     ArrayTy = Array->getType();
4171   }
4172   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
4173     return false;
4174 
4175   uint64_t NumElts = ArrayTy->getArrayNumElements();
4176   if (Offset > NumElts)
4177     return false;
4178 
4179   Slice.Array = Array;
4180   Slice.Offset = Offset;
4181   Slice.Length = NumElts - Offset;
4182   return true;
4183 }
4184 
4185 /// This function computes the length of a null-terminated C string pointed to
4186 /// by V. If successful, it returns true and returns the string in Str.
4187 /// If unsuccessful, it returns false.
4188 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4189                                  uint64_t Offset, bool TrimAtNul) {
4190   ConstantDataArraySlice Slice;
4191   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4192     return false;
4193 
4194   if (Slice.Array == nullptr) {
4195     if (TrimAtNul) {
4196       Str = StringRef();
4197       return true;
4198     }
4199     if (Slice.Length == 1) {
4200       Str = StringRef("", 1);
4201       return true;
4202     }
4203     // We cannot instantiate a StringRef as we do not have an appropriate string
4204     // of 0s at hand.
4205     return false;
4206   }
4207 
4208   // Start out with the entire array in the StringRef.
4209   Str = Slice.Array->getAsString();
4210   // Skip over 'offset' bytes.
4211   Str = Str.substr(Slice.Offset);
4212 
4213   if (TrimAtNul) {
4214     // Trim off the \0 and anything after it.  If the array is not nul
4215     // terminated, we just return the whole end of string.  The client may know
4216     // some other way that the string is length-bound.
4217     Str = Str.substr(0, Str.find('\0'));
4218   }
4219   return true;
4220 }
4221 
4222 // These next two are very similar to the above, but also look through PHI
4223 // nodes.
4224 // TODO: See if we can integrate these two together.
4225 
4226 /// If we can compute the length of the string pointed to by
4227 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4228 static uint64_t GetStringLengthH(const Value *V,
4229                                  SmallPtrSetImpl<const PHINode*> &PHIs,
4230                                  unsigned CharSize) {
4231   // Look through noop bitcast instructions.
4232   V = V->stripPointerCasts();
4233 
4234   // If this is a PHI node, there are two cases: either we have already seen it
4235   // or we haven't.
4236   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4237     if (!PHIs.insert(PN).second)
4238       return ~0ULL;  // already in the set.
4239 
4240     // If it was new, see if all the input strings are the same length.
4241     uint64_t LenSoFar = ~0ULL;
4242     for (Value *IncValue : PN->incoming_values()) {
4243       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4244       if (Len == 0) return 0; // Unknown length -> unknown.
4245 
4246       if (Len == ~0ULL) continue;
4247 
4248       if (Len != LenSoFar && LenSoFar != ~0ULL)
4249         return 0;    // Disagree -> unknown.
4250       LenSoFar = Len;
4251     }
4252 
4253     // Success, all agree.
4254     return LenSoFar;
4255   }
4256 
4257   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4258   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4259     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4260     if (Len1 == 0) return 0;
4261     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4262     if (Len2 == 0) return 0;
4263     if (Len1 == ~0ULL) return Len2;
4264     if (Len2 == ~0ULL) return Len1;
4265     if (Len1 != Len2) return 0;
4266     return Len1;
4267   }
4268 
4269   // Otherwise, see if we can read the string.
4270   ConstantDataArraySlice Slice;
4271   if (!getConstantDataArrayInfo(V, Slice, CharSize))
4272     return 0;
4273 
4274   if (Slice.Array == nullptr)
4275     return 1;
4276 
4277   // Search for nul characters
4278   unsigned NullIndex = 0;
4279   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4280     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4281       break;
4282   }
4283 
4284   return NullIndex + 1;
4285 }
4286 
4287 /// If we can compute the length of the string pointed to by
4288 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4289 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4290   if (!V->getType()->isPointerTy())
4291     return 0;
4292 
4293   SmallPtrSet<const PHINode*, 32> PHIs;
4294   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4295   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4296   // an empty string as a length.
4297   return Len == ~0ULL ? 1 : Len;
4298 }
4299 
4300 const Value *
4301 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4302                                            bool MustPreserveNullness) {
4303   assert(Call &&
4304          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4305   if (const Value *RV = Call->getReturnedArgOperand())
4306     return RV;
4307   // This can be used only as a aliasing property.
4308   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4309           Call, MustPreserveNullness))
4310     return Call->getArgOperand(0);
4311   return nullptr;
4312 }
4313 
4314 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4315     const CallBase *Call, bool MustPreserveNullness) {
4316   switch (Call->getIntrinsicID()) {
4317   case Intrinsic::launder_invariant_group:
4318   case Intrinsic::strip_invariant_group:
4319   case Intrinsic::aarch64_irg:
4320   case Intrinsic::aarch64_tagp:
4321     return true;
4322   case Intrinsic::ptrmask:
4323     return !MustPreserveNullness;
4324   default:
4325     return false;
4326   }
4327 }
4328 
4329 /// \p PN defines a loop-variant pointer to an object.  Check if the
4330 /// previous iteration of the loop was referring to the same object as \p PN.
4331 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4332                                          const LoopInfo *LI) {
4333   // Find the loop-defined value.
4334   Loop *L = LI->getLoopFor(PN->getParent());
4335   if (PN->getNumIncomingValues() != 2)
4336     return true;
4337 
4338   // Find the value from previous iteration.
4339   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4340   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4341     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4342   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4343     return true;
4344 
4345   // If a new pointer is loaded in the loop, the pointer references a different
4346   // object in every iteration.  E.g.:
4347   //    for (i)
4348   //       int *p = a[i];
4349   //       ...
4350   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4351     if (!L->isLoopInvariant(Load->getPointerOperand()))
4352       return false;
4353   return true;
4354 }
4355 
4356 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4357   if (!V->getType()->isPointerTy())
4358     return V;
4359   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4360     if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4361       V = GEP->getPointerOperand();
4362     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4363                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4364       V = cast<Operator>(V)->getOperand(0);
4365       if (!V->getType()->isPointerTy())
4366         return V;
4367     } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4368       if (GA->isInterposable())
4369         return V;
4370       V = GA->getAliasee();
4371     } else {
4372       if (auto *PHI = dyn_cast<PHINode>(V)) {
4373         // Look through single-arg phi nodes created by LCSSA.
4374         if (PHI->getNumIncomingValues() == 1) {
4375           V = PHI->getIncomingValue(0);
4376           continue;
4377         }
4378       } else if (auto *Call = dyn_cast<CallBase>(V)) {
4379         // CaptureTracking can know about special capturing properties of some
4380         // intrinsics like launder.invariant.group, that can't be expressed with
4381         // the attributes, but have properties like returning aliasing pointer.
4382         // Because some analysis may assume that nocaptured pointer is not
4383         // returned from some special intrinsic (because function would have to
4384         // be marked with returns attribute), it is crucial to use this function
4385         // because it should be in sync with CaptureTracking. Not using it may
4386         // cause weird miscompilations where 2 aliasing pointers are assumed to
4387         // noalias.
4388         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4389           V = RP;
4390           continue;
4391         }
4392       }
4393 
4394       return V;
4395     }
4396     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4397   }
4398   return V;
4399 }
4400 
4401 void llvm::getUnderlyingObjects(const Value *V,
4402                                 SmallVectorImpl<const Value *> &Objects,
4403                                 LoopInfo *LI, unsigned MaxLookup) {
4404   SmallPtrSet<const Value *, 4> Visited;
4405   SmallVector<const Value *, 4> Worklist;
4406   Worklist.push_back(V);
4407   do {
4408     const Value *P = Worklist.pop_back_val();
4409     P = getUnderlyingObject(P, MaxLookup);
4410 
4411     if (!Visited.insert(P).second)
4412       continue;
4413 
4414     if (auto *SI = dyn_cast<SelectInst>(P)) {
4415       Worklist.push_back(SI->getTrueValue());
4416       Worklist.push_back(SI->getFalseValue());
4417       continue;
4418     }
4419 
4420     if (auto *PN = dyn_cast<PHINode>(P)) {
4421       // If this PHI changes the underlying object in every iteration of the
4422       // loop, don't look through it.  Consider:
4423       //   int **A;
4424       //   for (i) {
4425       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
4426       //     Curr = A[i];
4427       //     *Prev, *Curr;
4428       //
4429       // Prev is tracking Curr one iteration behind so they refer to different
4430       // underlying objects.
4431       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4432           isSameUnderlyingObjectInLoop(PN, LI))
4433         append_range(Worklist, PN->incoming_values());
4434       continue;
4435     }
4436 
4437     Objects.push_back(P);
4438   } while (!Worklist.empty());
4439 }
4440 
4441 /// This is the function that does the work of looking through basic
4442 /// ptrtoint+arithmetic+inttoptr sequences.
4443 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4444   do {
4445     if (const Operator *U = dyn_cast<Operator>(V)) {
4446       // If we find a ptrtoint, we can transfer control back to the
4447       // regular getUnderlyingObjectFromInt.
4448       if (U->getOpcode() == Instruction::PtrToInt)
4449         return U->getOperand(0);
4450       // If we find an add of a constant, a multiplied value, or a phi, it's
4451       // likely that the other operand will lead us to the base
4452       // object. We don't have to worry about the case where the
4453       // object address is somehow being computed by the multiply,
4454       // because our callers only care when the result is an
4455       // identifiable object.
4456       if (U->getOpcode() != Instruction::Add ||
4457           (!isa<ConstantInt>(U->getOperand(1)) &&
4458            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4459            !isa<PHINode>(U->getOperand(1))))
4460         return V;
4461       V = U->getOperand(0);
4462     } else {
4463       return V;
4464     }
4465     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4466   } while (true);
4467 }
4468 
4469 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4470 /// ptrtoint+arithmetic+inttoptr sequences.
4471 /// It returns false if unidentified object is found in getUnderlyingObjects.
4472 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4473                                           SmallVectorImpl<Value *> &Objects) {
4474   SmallPtrSet<const Value *, 16> Visited;
4475   SmallVector<const Value *, 4> Working(1, V);
4476   do {
4477     V = Working.pop_back_val();
4478 
4479     SmallVector<const Value *, 4> Objs;
4480     getUnderlyingObjects(V, Objs);
4481 
4482     for (const Value *V : Objs) {
4483       if (!Visited.insert(V).second)
4484         continue;
4485       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4486         const Value *O =
4487           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4488         if (O->getType()->isPointerTy()) {
4489           Working.push_back(O);
4490           continue;
4491         }
4492       }
4493       // If getUnderlyingObjects fails to find an identifiable object,
4494       // getUnderlyingObjectsForCodeGen also fails for safety.
4495       if (!isIdentifiedObject(V)) {
4496         Objects.clear();
4497         return false;
4498       }
4499       Objects.push_back(const_cast<Value *>(V));
4500     }
4501   } while (!Working.empty());
4502   return true;
4503 }
4504 
4505 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4506   AllocaInst *Result = nullptr;
4507   SmallPtrSet<Value *, 4> Visited;
4508   SmallVector<Value *, 4> Worklist;
4509 
4510   auto AddWork = [&](Value *V) {
4511     if (Visited.insert(V).second)
4512       Worklist.push_back(V);
4513   };
4514 
4515   AddWork(V);
4516   do {
4517     V = Worklist.pop_back_val();
4518     assert(Visited.count(V));
4519 
4520     if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4521       if (Result && Result != AI)
4522         return nullptr;
4523       Result = AI;
4524     } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4525       AddWork(CI->getOperand(0));
4526     } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4527       for (Value *IncValue : PN->incoming_values())
4528         AddWork(IncValue);
4529     } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4530       AddWork(SI->getTrueValue());
4531       AddWork(SI->getFalseValue());
4532     } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4533       if (OffsetZero && !GEP->hasAllZeroIndices())
4534         return nullptr;
4535       AddWork(GEP->getPointerOperand());
4536     } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
4537       Value *Returned = CB->getReturnedArgOperand();
4538       if (Returned)
4539         AddWork(Returned);
4540       else
4541         return nullptr;
4542     } else {
4543       return nullptr;
4544     }
4545   } while (!Worklist.empty());
4546 
4547   return Result;
4548 }
4549 
4550 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4551     const Value *V, bool AllowLifetime, bool AllowDroppable) {
4552   for (const User *U : V->users()) {
4553     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4554     if (!II)
4555       return false;
4556 
4557     if (AllowLifetime && II->isLifetimeStartOrEnd())
4558       continue;
4559 
4560     if (AllowDroppable && II->isDroppable())
4561       continue;
4562 
4563     return false;
4564   }
4565   return true;
4566 }
4567 
4568 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4569   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4570       V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4571 }
4572 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4573   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4574       V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4575 }
4576 
4577 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4578   if (!LI.isUnordered())
4579     return true;
4580   const Function &F = *LI.getFunction();
4581   // Speculative load may create a race that did not exist in the source.
4582   return F.hasFnAttribute(Attribute::SanitizeThread) ||
4583     // Speculative load may load data from dirty regions.
4584     F.hasFnAttribute(Attribute::SanitizeAddress) ||
4585     F.hasFnAttribute(Attribute::SanitizeHWAddress);
4586 }
4587 
4588 
4589 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4590                                         const Instruction *CtxI,
4591                                         const DominatorTree *DT,
4592                                         const TargetLibraryInfo *TLI) {
4593   const Operator *Inst = dyn_cast<Operator>(V);
4594   if (!Inst)
4595     return false;
4596 
4597   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4598     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4599       if (C->canTrap())
4600         return false;
4601 
4602   switch (Inst->getOpcode()) {
4603   default:
4604     return true;
4605   case Instruction::UDiv:
4606   case Instruction::URem: {
4607     // x / y is undefined if y == 0.
4608     const APInt *V;
4609     if (match(Inst->getOperand(1), m_APInt(V)))
4610       return *V != 0;
4611     return false;
4612   }
4613   case Instruction::SDiv:
4614   case Instruction::SRem: {
4615     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4616     const APInt *Numerator, *Denominator;
4617     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4618       return false;
4619     // We cannot hoist this division if the denominator is 0.
4620     if (*Denominator == 0)
4621       return false;
4622     // It's safe to hoist if the denominator is not 0 or -1.
4623     if (!Denominator->isAllOnesValue())
4624       return true;
4625     // At this point we know that the denominator is -1.  It is safe to hoist as
4626     // long we know that the numerator is not INT_MIN.
4627     if (match(Inst->getOperand(0), m_APInt(Numerator)))
4628       return !Numerator->isMinSignedValue();
4629     // The numerator *might* be MinSignedValue.
4630     return false;
4631   }
4632   case Instruction::Load: {
4633     const LoadInst *LI = cast<LoadInst>(Inst);
4634     if (mustSuppressSpeculation(*LI))
4635       return false;
4636     const DataLayout &DL = LI->getModule()->getDataLayout();
4637     return isDereferenceableAndAlignedPointer(
4638         LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4639         DL, CtxI, DT, TLI);
4640   }
4641   case Instruction::Call: {
4642     auto *CI = cast<const CallInst>(Inst);
4643     const Function *Callee = CI->getCalledFunction();
4644 
4645     // The called function could have undefined behavior or side-effects, even
4646     // if marked readnone nounwind.
4647     return Callee && Callee->isSpeculatable();
4648   }
4649   case Instruction::VAArg:
4650   case Instruction::Alloca:
4651   case Instruction::Invoke:
4652   case Instruction::CallBr:
4653   case Instruction::PHI:
4654   case Instruction::Store:
4655   case Instruction::Ret:
4656   case Instruction::Br:
4657   case Instruction::IndirectBr:
4658   case Instruction::Switch:
4659   case Instruction::Unreachable:
4660   case Instruction::Fence:
4661   case Instruction::AtomicRMW:
4662   case Instruction::AtomicCmpXchg:
4663   case Instruction::LandingPad:
4664   case Instruction::Resume:
4665   case Instruction::CatchSwitch:
4666   case Instruction::CatchPad:
4667   case Instruction::CatchRet:
4668   case Instruction::CleanupPad:
4669   case Instruction::CleanupRet:
4670     return false; // Misc instructions which have effects
4671   }
4672 }
4673 
4674 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4675   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4676 }
4677 
4678 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4679 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4680   switch (OR) {
4681     case ConstantRange::OverflowResult::MayOverflow:
4682       return OverflowResult::MayOverflow;
4683     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4684       return OverflowResult::AlwaysOverflowsLow;
4685     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4686       return OverflowResult::AlwaysOverflowsHigh;
4687     case ConstantRange::OverflowResult::NeverOverflows:
4688       return OverflowResult::NeverOverflows;
4689   }
4690   llvm_unreachable("Unknown OverflowResult");
4691 }
4692 
4693 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4694 static ConstantRange computeConstantRangeIncludingKnownBits(
4695     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4696     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4697     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4698   KnownBits Known = computeKnownBits(
4699       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4700   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4701   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4702   ConstantRange::PreferredRangeType RangeType =
4703       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4704   return CR1.intersectWith(CR2, RangeType);
4705 }
4706 
4707 OverflowResult llvm::computeOverflowForUnsignedMul(
4708     const Value *LHS, const Value *RHS, const DataLayout &DL,
4709     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4710     bool UseInstrInfo) {
4711   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4712                                         nullptr, UseInstrInfo);
4713   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4714                                         nullptr, UseInstrInfo);
4715   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4716   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4717   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4718 }
4719 
4720 OverflowResult
4721 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4722                                   const DataLayout &DL, AssumptionCache *AC,
4723                                   const Instruction *CxtI,
4724                                   const DominatorTree *DT, bool UseInstrInfo) {
4725   // Multiplying n * m significant bits yields a result of n + m significant
4726   // bits. If the total number of significant bits does not exceed the
4727   // result bit width (minus 1), there is no overflow.
4728   // This means if we have enough leading sign bits in the operands
4729   // we can guarantee that the result does not overflow.
4730   // Ref: "Hacker's Delight" by Henry Warren
4731   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4732 
4733   // Note that underestimating the number of sign bits gives a more
4734   // conservative answer.
4735   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4736                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4737 
4738   // First handle the easy case: if we have enough sign bits there's
4739   // definitely no overflow.
4740   if (SignBits > BitWidth + 1)
4741     return OverflowResult::NeverOverflows;
4742 
4743   // There are two ambiguous cases where there can be no overflow:
4744   //   SignBits == BitWidth + 1    and
4745   //   SignBits == BitWidth
4746   // The second case is difficult to check, therefore we only handle the
4747   // first case.
4748   if (SignBits == BitWidth + 1) {
4749     // It overflows only when both arguments are negative and the true
4750     // product is exactly the minimum negative number.
4751     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4752     // For simplicity we just check if at least one side is not negative.
4753     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4754                                           nullptr, UseInstrInfo);
4755     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4756                                           nullptr, UseInstrInfo);
4757     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4758       return OverflowResult::NeverOverflows;
4759   }
4760   return OverflowResult::MayOverflow;
4761 }
4762 
4763 OverflowResult llvm::computeOverflowForUnsignedAdd(
4764     const Value *LHS, const Value *RHS, const DataLayout &DL,
4765     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4766     bool UseInstrInfo) {
4767   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4768       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4769       nullptr, UseInstrInfo);
4770   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4771       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4772       nullptr, UseInstrInfo);
4773   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4774 }
4775 
4776 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4777                                                   const Value *RHS,
4778                                                   const AddOperator *Add,
4779                                                   const DataLayout &DL,
4780                                                   AssumptionCache *AC,
4781                                                   const Instruction *CxtI,
4782                                                   const DominatorTree *DT) {
4783   if (Add && Add->hasNoSignedWrap()) {
4784     return OverflowResult::NeverOverflows;
4785   }
4786 
4787   // If LHS and RHS each have at least two sign bits, the addition will look
4788   // like
4789   //
4790   // XX..... +
4791   // YY.....
4792   //
4793   // If the carry into the most significant position is 0, X and Y can't both
4794   // be 1 and therefore the carry out of the addition is also 0.
4795   //
4796   // If the carry into the most significant position is 1, X and Y can't both
4797   // be 0 and therefore the carry out of the addition is also 1.
4798   //
4799   // Since the carry into the most significant position is always equal to
4800   // the carry out of the addition, there is no signed overflow.
4801   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4802       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4803     return OverflowResult::NeverOverflows;
4804 
4805   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4806       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4807   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4808       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4809   OverflowResult OR =
4810       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4811   if (OR != OverflowResult::MayOverflow)
4812     return OR;
4813 
4814   // The remaining code needs Add to be available. Early returns if not so.
4815   if (!Add)
4816     return OverflowResult::MayOverflow;
4817 
4818   // If the sign of Add is the same as at least one of the operands, this add
4819   // CANNOT overflow. If this can be determined from the known bits of the
4820   // operands the above signedAddMayOverflow() check will have already done so.
4821   // The only other way to improve on the known bits is from an assumption, so
4822   // call computeKnownBitsFromAssume() directly.
4823   bool LHSOrRHSKnownNonNegative =
4824       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4825   bool LHSOrRHSKnownNegative =
4826       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4827   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4828     KnownBits AddKnown(LHSRange.getBitWidth());
4829     computeKnownBitsFromAssume(
4830         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4831     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4832         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4833       return OverflowResult::NeverOverflows;
4834   }
4835 
4836   return OverflowResult::MayOverflow;
4837 }
4838 
4839 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4840                                                    const Value *RHS,
4841                                                    const DataLayout &DL,
4842                                                    AssumptionCache *AC,
4843                                                    const Instruction *CxtI,
4844                                                    const DominatorTree *DT) {
4845   // Checking for conditions implied by dominating conditions may be expensive.
4846   // Limit it to usub_with_overflow calls for now.
4847   if (match(CxtI,
4848             m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4849     if (auto C =
4850             isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4851       if (*C)
4852         return OverflowResult::NeverOverflows;
4853       return OverflowResult::AlwaysOverflowsLow;
4854     }
4855   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4856       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4857   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4858       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4859   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4860 }
4861 
4862 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4863                                                  const Value *RHS,
4864                                                  const DataLayout &DL,
4865                                                  AssumptionCache *AC,
4866                                                  const Instruction *CxtI,
4867                                                  const DominatorTree *DT) {
4868   // If LHS and RHS each have at least two sign bits, the subtraction
4869   // cannot overflow.
4870   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4871       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4872     return OverflowResult::NeverOverflows;
4873 
4874   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4875       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4876   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4877       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4878   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4879 }
4880 
4881 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4882                                      const DominatorTree &DT) {
4883   SmallVector<const BranchInst *, 2> GuardingBranches;
4884   SmallVector<const ExtractValueInst *, 2> Results;
4885 
4886   for (const User *U : WO->users()) {
4887     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4888       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4889 
4890       if (EVI->getIndices()[0] == 0)
4891         Results.push_back(EVI);
4892       else {
4893         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4894 
4895         for (const auto *U : EVI->users())
4896           if (const auto *B = dyn_cast<BranchInst>(U)) {
4897             assert(B->isConditional() && "How else is it using an i1?");
4898             GuardingBranches.push_back(B);
4899           }
4900       }
4901     } else {
4902       // We are using the aggregate directly in a way we don't want to analyze
4903       // here (storing it to a global, say).
4904       return false;
4905     }
4906   }
4907 
4908   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4909     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4910     if (!NoWrapEdge.isSingleEdge())
4911       return false;
4912 
4913     // Check if all users of the add are provably no-wrap.
4914     for (const auto *Result : Results) {
4915       // If the extractvalue itself is not executed on overflow, the we don't
4916       // need to check each use separately, since domination is transitive.
4917       if (DT.dominates(NoWrapEdge, Result->getParent()))
4918         continue;
4919 
4920       for (auto &RU : Result->uses())
4921         if (!DT.dominates(NoWrapEdge, RU))
4922           return false;
4923     }
4924 
4925     return true;
4926   };
4927 
4928   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4929 }
4930 
4931 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4932   // See whether I has flags that may create poison
4933   if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4934     if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4935       return true;
4936   }
4937   if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4938     if (ExactOp->isExact())
4939       return true;
4940   if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4941     auto FMF = FP->getFastMathFlags();
4942     if (FMF.noNaNs() || FMF.noInfs())
4943       return true;
4944   }
4945 
4946   unsigned Opcode = Op->getOpcode();
4947 
4948   // Check whether opcode is a poison/undef-generating operation
4949   switch (Opcode) {
4950   case Instruction::Shl:
4951   case Instruction::AShr:
4952   case Instruction::LShr: {
4953     // Shifts return poison if shiftwidth is larger than the bitwidth.
4954     if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4955       SmallVector<Constant *, 4> ShiftAmounts;
4956       if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4957         unsigned NumElts = FVTy->getNumElements();
4958         for (unsigned i = 0; i < NumElts; ++i)
4959           ShiftAmounts.push_back(C->getAggregateElement(i));
4960       } else if (isa<ScalableVectorType>(C->getType()))
4961         return true; // Can't tell, just return true to be safe
4962       else
4963         ShiftAmounts.push_back(C);
4964 
4965       bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4966         auto *CI = dyn_cast_or_null<ConstantInt>(C);
4967         return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4968       });
4969       return !Safe;
4970     }
4971     return true;
4972   }
4973   case Instruction::FPToSI:
4974   case Instruction::FPToUI:
4975     // fptosi/ui yields poison if the resulting value does not fit in the
4976     // destination type.
4977     return true;
4978   case Instruction::Call:
4979     if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
4980       switch (II->getIntrinsicID()) {
4981       // TODO: Add more intrinsics.
4982       case Intrinsic::ctpop:
4983       case Intrinsic::sadd_with_overflow:
4984       case Intrinsic::ssub_with_overflow:
4985       case Intrinsic::smul_with_overflow:
4986       case Intrinsic::uadd_with_overflow:
4987       case Intrinsic::usub_with_overflow:
4988       case Intrinsic::umul_with_overflow:
4989         return false;
4990       }
4991     }
4992     LLVM_FALLTHROUGH;
4993   case Instruction::CallBr:
4994   case Instruction::Invoke: {
4995     const auto *CB = cast<CallBase>(Op);
4996     return !CB->hasRetAttr(Attribute::NoUndef);
4997   }
4998   case Instruction::InsertElement:
4999   case Instruction::ExtractElement: {
5000     // If index exceeds the length of the vector, it returns poison
5001     auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
5002     unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
5003     auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
5004     if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
5005       return true;
5006     return false;
5007   }
5008   case Instruction::ShuffleVector: {
5009     // shufflevector may return undef.
5010     if (PoisonOnly)
5011       return false;
5012     ArrayRef<int> Mask = isa<ConstantExpr>(Op)
5013                              ? cast<ConstantExpr>(Op)->getShuffleMask()
5014                              : cast<ShuffleVectorInst>(Op)->getShuffleMask();
5015     return is_contained(Mask, UndefMaskElem);
5016   }
5017   case Instruction::FNeg:
5018   case Instruction::PHI:
5019   case Instruction::Select:
5020   case Instruction::URem:
5021   case Instruction::SRem:
5022   case Instruction::ExtractValue:
5023   case Instruction::InsertValue:
5024   case Instruction::Freeze:
5025   case Instruction::ICmp:
5026   case Instruction::FCmp:
5027     return false;
5028   case Instruction::GetElementPtr: {
5029     const auto *GEP = cast<GEPOperator>(Op);
5030     return GEP->isInBounds();
5031   }
5032   default: {
5033     const auto *CE = dyn_cast<ConstantExpr>(Op);
5034     if (isa<CastInst>(Op) || (CE && CE->isCast()))
5035       return false;
5036     else if (Instruction::isBinaryOp(Opcode))
5037       return false;
5038     // Be conservative and return true.
5039     return true;
5040   }
5041   }
5042 }
5043 
5044 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
5045   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
5046 }
5047 
5048 bool llvm::canCreatePoison(const Operator *Op) {
5049   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
5050 }
5051 
5052 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
5053                                   const Value *V, unsigned Depth) {
5054   if (ValAssumedPoison == V)
5055     return true;
5056 
5057   const unsigned MaxDepth = 2;
5058   if (Depth >= MaxDepth)
5059     return false;
5060 
5061   if (const auto *I = dyn_cast<Instruction>(V)) {
5062     if (propagatesPoison(cast<Operator>(I)))
5063       return any_of(I->operands(), [=](const Value *Op) {
5064         return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
5065       });
5066 
5067     // 'select ValAssumedPoison, _, _' is poison.
5068     if (const auto *SI = dyn_cast<SelectInst>(I))
5069       return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
5070                                    Depth + 1);
5071     // V  = extractvalue V0, idx
5072     // V2 = extractvalue V0, idx2
5073     // V0's elements are all poison or not. (e.g., add_with_overflow)
5074     const WithOverflowInst *II;
5075     if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
5076         (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
5077          llvm::is_contained(II->arg_operands(), ValAssumedPoison)))
5078       return true;
5079   }
5080   return false;
5081 }
5082 
5083 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
5084                           unsigned Depth) {
5085   if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
5086     return true;
5087 
5088   if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
5089     return true;
5090 
5091   const unsigned MaxDepth = 2;
5092   if (Depth >= MaxDepth)
5093     return false;
5094 
5095   const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
5096   if (I && !canCreatePoison(cast<Operator>(I))) {
5097     return all_of(I->operands(), [=](const Value *Op) {
5098       return impliesPoison(Op, V, Depth + 1);
5099     });
5100   }
5101   return false;
5102 }
5103 
5104 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
5105   return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
5106 }
5107 
5108 static bool programUndefinedIfUndefOrPoison(const Value *V,
5109                                             bool PoisonOnly);
5110 
5111 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
5112                                              AssumptionCache *AC,
5113                                              const Instruction *CtxI,
5114                                              const DominatorTree *DT,
5115                                              unsigned Depth, bool PoisonOnly) {
5116   if (Depth >= MaxAnalysisRecursionDepth)
5117     return false;
5118 
5119   if (isa<MetadataAsValue>(V))
5120     return false;
5121 
5122   if (const auto *A = dyn_cast<Argument>(V)) {
5123     if (A->hasAttribute(Attribute::NoUndef))
5124       return true;
5125   }
5126 
5127   if (auto *C = dyn_cast<Constant>(V)) {
5128     if (isa<UndefValue>(C))
5129       return PoisonOnly && !isa<PoisonValue>(C);
5130 
5131     if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
5132         isa<ConstantPointerNull>(C) || isa<Function>(C))
5133       return true;
5134 
5135     if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
5136       return (PoisonOnly ? !C->containsPoisonElement()
5137                          : !C->containsUndefOrPoisonElement()) &&
5138              !C->containsConstantExpression();
5139   }
5140 
5141   // Strip cast operations from a pointer value.
5142   // Note that stripPointerCastsSameRepresentation can strip off getelementptr
5143   // inbounds with zero offset. To guarantee that the result isn't poison, the
5144   // stripped pointer is checked as it has to be pointing into an allocated
5145   // object or be null `null` to ensure `inbounds` getelement pointers with a
5146   // zero offset could not produce poison.
5147   // It can strip off addrspacecast that do not change bit representation as
5148   // well. We believe that such addrspacecast is equivalent to no-op.
5149   auto *StrippedV = V->stripPointerCastsSameRepresentation();
5150   if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
5151       isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
5152     return true;
5153 
5154   auto OpCheck = [&](const Value *V) {
5155     return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
5156                                             PoisonOnly);
5157   };
5158 
5159   if (auto *Opr = dyn_cast<Operator>(V)) {
5160     // If the value is a freeze instruction, then it can never
5161     // be undef or poison.
5162     if (isa<FreezeInst>(V))
5163       return true;
5164 
5165     if (const auto *CB = dyn_cast<CallBase>(V)) {
5166       if (CB->hasRetAttr(Attribute::NoUndef))
5167         return true;
5168     }
5169 
5170     if (const auto *PN = dyn_cast<PHINode>(V)) {
5171       unsigned Num = PN->getNumIncomingValues();
5172       bool IsWellDefined = true;
5173       for (unsigned i = 0; i < Num; ++i) {
5174         auto *TI = PN->getIncomingBlock(i)->getTerminator();
5175         if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
5176                                               DT, Depth + 1, PoisonOnly)) {
5177           IsWellDefined = false;
5178           break;
5179         }
5180       }
5181       if (IsWellDefined)
5182         return true;
5183     } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
5184       return true;
5185   }
5186 
5187   if (auto *I = dyn_cast<LoadInst>(V))
5188     if (I->getMetadata(LLVMContext::MD_noundef))
5189       return true;
5190 
5191   if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
5192     return true;
5193 
5194   // CxtI may be null or a cloned instruction.
5195   if (!CtxI || !CtxI->getParent() || !DT)
5196     return false;
5197 
5198   auto *DNode = DT->getNode(CtxI->getParent());
5199   if (!DNode)
5200     // Unreachable block
5201     return false;
5202 
5203   // If V is used as a branch condition before reaching CtxI, V cannot be
5204   // undef or poison.
5205   //   br V, BB1, BB2
5206   // BB1:
5207   //   CtxI ; V cannot be undef or poison here
5208   auto *Dominator = DNode->getIDom();
5209   while (Dominator) {
5210     auto *TI = Dominator->getBlock()->getTerminator();
5211 
5212     Value *Cond = nullptr;
5213     if (auto BI = dyn_cast<BranchInst>(TI)) {
5214       if (BI->isConditional())
5215         Cond = BI->getCondition();
5216     } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
5217       Cond = SI->getCondition();
5218     }
5219 
5220     if (Cond) {
5221       if (Cond == V)
5222         return true;
5223       else if (PoisonOnly && isa<Operator>(Cond)) {
5224         // For poison, we can analyze further
5225         auto *Opr = cast<Operator>(Cond);
5226         if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
5227           return true;
5228       }
5229     }
5230 
5231     Dominator = Dominator->getIDom();
5232   }
5233 
5234   SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NoUndef};
5235   if (getKnowledgeValidInContext(V, AttrKinds, CtxI, DT, AC))
5236     return true;
5237 
5238   return false;
5239 }
5240 
5241 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5242                                             const Instruction *CtxI,
5243                                             const DominatorTree *DT,
5244                                             unsigned Depth) {
5245   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5246 }
5247 
5248 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5249                                      const Instruction *CtxI,
5250                                      const DominatorTree *DT, unsigned Depth) {
5251   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5252 }
5253 
5254 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5255                                                  const DataLayout &DL,
5256                                                  AssumptionCache *AC,
5257                                                  const Instruction *CxtI,
5258                                                  const DominatorTree *DT) {
5259   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5260                                        Add, DL, AC, CxtI, DT);
5261 }
5262 
5263 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5264                                                  const Value *RHS,
5265                                                  const DataLayout &DL,
5266                                                  AssumptionCache *AC,
5267                                                  const Instruction *CxtI,
5268                                                  const DominatorTree *DT) {
5269   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5270 }
5271 
5272 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5273   // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5274   // of time because it's possible for another thread to interfere with it for an
5275   // arbitrary length of time, but programs aren't allowed to rely on that.
5276 
5277   // If there is no successor, then execution can't transfer to it.
5278   if (isa<ReturnInst>(I))
5279     return false;
5280   if (isa<UnreachableInst>(I))
5281     return false;
5282 
5283   // Note: Do not add new checks here; instead, change Instruction::mayThrow or
5284   // Instruction::willReturn.
5285   //
5286   // FIXME: Move this check into Instruction::willReturn.
5287   if (isa<CatchPadInst>(I)) {
5288     switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
5289     default:
5290       // A catchpad may invoke exception object constructors and such, which
5291       // in some languages can be arbitrary code, so be conservative by default.
5292       return false;
5293     case EHPersonality::CoreCLR:
5294       // For CoreCLR, it just involves a type test.
5295       return true;
5296     }
5297   }
5298 
5299   // An instruction that returns without throwing must transfer control flow
5300   // to a successor.
5301   return !I->mayThrow() && I->willReturn();
5302 }
5303 
5304 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5305   // TODO: This is slightly conservative for invoke instruction since exiting
5306   // via an exception *is* normal control for them.
5307   for (const Instruction &I : *BB)
5308     if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5309       return false;
5310   return true;
5311 }
5312 
5313 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5314                                                   const Loop *L) {
5315   // The loop header is guaranteed to be executed for every iteration.
5316   //
5317   // FIXME: Relax this constraint to cover all basic blocks that are
5318   // guaranteed to be executed at every iteration.
5319   if (I->getParent() != L->getHeader()) return false;
5320 
5321   for (const Instruction &LI : *L->getHeader()) {
5322     if (&LI == I) return true;
5323     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5324   }
5325   llvm_unreachable("Instruction not contained in its own parent basic block.");
5326 }
5327 
5328 bool llvm::propagatesPoison(const Operator *I) {
5329   switch (I->getOpcode()) {
5330   case Instruction::Freeze:
5331   case Instruction::Select:
5332   case Instruction::PHI:
5333   case Instruction::Invoke:
5334     return false;
5335   case Instruction::Call:
5336     if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5337       switch (II->getIntrinsicID()) {
5338       // TODO: Add more intrinsics.
5339       case Intrinsic::sadd_with_overflow:
5340       case Intrinsic::ssub_with_overflow:
5341       case Intrinsic::smul_with_overflow:
5342       case Intrinsic::uadd_with_overflow:
5343       case Intrinsic::usub_with_overflow:
5344       case Intrinsic::umul_with_overflow:
5345         // If an input is a vector containing a poison element, the
5346         // two output vectors (calculated results, overflow bits)'
5347         // corresponding lanes are poison.
5348         return true;
5349       case Intrinsic::ctpop:
5350         return true;
5351       }
5352     }
5353     return false;
5354   case Instruction::ICmp:
5355   case Instruction::FCmp:
5356   case Instruction::GetElementPtr:
5357     return true;
5358   default:
5359     if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5360       return true;
5361 
5362     // Be conservative and return false.
5363     return false;
5364   }
5365 }
5366 
5367 void llvm::getGuaranteedWellDefinedOps(
5368     const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
5369   switch (I->getOpcode()) {
5370     case Instruction::Store:
5371       Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5372       break;
5373 
5374     case Instruction::Load:
5375       Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5376       break;
5377 
5378     // Since dereferenceable attribute imply noundef, atomic operations
5379     // also implicitly have noundef pointers too
5380     case Instruction::AtomicCmpXchg:
5381       Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5382       break;
5383 
5384     case Instruction::AtomicRMW:
5385       Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5386       break;
5387 
5388     case Instruction::Call:
5389     case Instruction::Invoke: {
5390       const CallBase *CB = cast<CallBase>(I);
5391       if (CB->isIndirectCall())
5392         Operands.insert(CB->getCalledOperand());
5393       for (unsigned i = 0; i < CB->arg_size(); ++i) {
5394         if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5395             CB->paramHasAttr(i, Attribute::Dereferenceable))
5396           Operands.insert(CB->getArgOperand(i));
5397       }
5398       break;
5399     }
5400 
5401     default:
5402       break;
5403   }
5404 }
5405 
5406 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5407                                      SmallPtrSetImpl<const Value *> &Operands) {
5408   getGuaranteedWellDefinedOps(I, Operands);
5409   switch (I->getOpcode()) {
5410   // Divisors of these operations are allowed to be partially undef.
5411   case Instruction::UDiv:
5412   case Instruction::SDiv:
5413   case Instruction::URem:
5414   case Instruction::SRem:
5415     Operands.insert(I->getOperand(1));
5416     break;
5417 
5418   default:
5419     break;
5420   }
5421 }
5422 
5423 bool llvm::mustTriggerUB(const Instruction *I,
5424                          const SmallSet<const Value *, 16>& KnownPoison) {
5425   SmallPtrSet<const Value *, 4> NonPoisonOps;
5426   getGuaranteedNonPoisonOps(I, NonPoisonOps);
5427 
5428   for (const auto *V : NonPoisonOps)
5429     if (KnownPoison.count(V))
5430       return true;
5431 
5432   return false;
5433 }
5434 
5435 static bool programUndefinedIfUndefOrPoison(const Value *V,
5436                                             bool PoisonOnly) {
5437   // We currently only look for uses of values within the same basic
5438   // block, as that makes it easier to guarantee that the uses will be
5439   // executed given that Inst is executed.
5440   //
5441   // FIXME: Expand this to consider uses beyond the same basic block. To do
5442   // this, look out for the distinction between post-dominance and strong
5443   // post-dominance.
5444   const BasicBlock *BB = nullptr;
5445   BasicBlock::const_iterator Begin;
5446   if (const auto *Inst = dyn_cast<Instruction>(V)) {
5447     BB = Inst->getParent();
5448     Begin = Inst->getIterator();
5449     Begin++;
5450   } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5451     BB = &Arg->getParent()->getEntryBlock();
5452     Begin = BB->begin();
5453   } else {
5454     return false;
5455   }
5456 
5457   // Limit number of instructions we look at, to avoid scanning through large
5458   // blocks. The current limit is chosen arbitrarily.
5459   unsigned ScanLimit = 32;
5460   BasicBlock::const_iterator End = BB->end();
5461 
5462   if (!PoisonOnly) {
5463     // Since undef does not propagate eagerly, be conservative & just check
5464     // whether a value is directly passed to an instruction that must take
5465     // well-defined operands.
5466 
5467     for (auto &I : make_range(Begin, End)) {
5468       if (isa<DbgInfoIntrinsic>(I))
5469         continue;
5470       if (--ScanLimit == 0)
5471         break;
5472 
5473       SmallPtrSet<const Value *, 4> WellDefinedOps;
5474       getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5475       if (WellDefinedOps.contains(V))
5476         return true;
5477 
5478       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5479         break;
5480     }
5481     return false;
5482   }
5483 
5484   // Set of instructions that we have proved will yield poison if Inst
5485   // does.
5486   SmallSet<const Value *, 16> YieldsPoison;
5487   SmallSet<const BasicBlock *, 4> Visited;
5488 
5489   YieldsPoison.insert(V);
5490   auto Propagate = [&](const User *User) {
5491     if (propagatesPoison(cast<Operator>(User)))
5492       YieldsPoison.insert(User);
5493   };
5494   for_each(V->users(), Propagate);
5495   Visited.insert(BB);
5496 
5497   while (true) {
5498     for (auto &I : make_range(Begin, End)) {
5499       if (isa<DbgInfoIntrinsic>(I))
5500         continue;
5501       if (--ScanLimit == 0)
5502         return false;
5503       if (mustTriggerUB(&I, YieldsPoison))
5504         return true;
5505       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5506         return false;
5507 
5508       // Mark poison that propagates from I through uses of I.
5509       if (YieldsPoison.count(&I))
5510         for_each(I.users(), Propagate);
5511     }
5512 
5513     BB = BB->getSingleSuccessor();
5514     if (!BB || !Visited.insert(BB).second)
5515       break;
5516 
5517     Begin = BB->getFirstNonPHI()->getIterator();
5518     End = BB->end();
5519   }
5520   return false;
5521 }
5522 
5523 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5524   return ::programUndefinedIfUndefOrPoison(Inst, false);
5525 }
5526 
5527 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5528   return ::programUndefinedIfUndefOrPoison(Inst, true);
5529 }
5530 
5531 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5532   if (FMF.noNaNs())
5533     return true;
5534 
5535   if (auto *C = dyn_cast<ConstantFP>(V))
5536     return !C->isNaN();
5537 
5538   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5539     if (!C->getElementType()->isFloatingPointTy())
5540       return false;
5541     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5542       if (C->getElementAsAPFloat(I).isNaN())
5543         return false;
5544     }
5545     return true;
5546   }
5547 
5548   if (isa<ConstantAggregateZero>(V))
5549     return true;
5550 
5551   return false;
5552 }
5553 
5554 static bool isKnownNonZero(const Value *V) {
5555   if (auto *C = dyn_cast<ConstantFP>(V))
5556     return !C->isZero();
5557 
5558   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5559     if (!C->getElementType()->isFloatingPointTy())
5560       return false;
5561     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5562       if (C->getElementAsAPFloat(I).isZero())
5563         return false;
5564     }
5565     return true;
5566   }
5567 
5568   return false;
5569 }
5570 
5571 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5572 /// Given non-min/max outer cmp/select from the clamp pattern this
5573 /// function recognizes if it can be substitued by a "canonical" min/max
5574 /// pattern.
5575 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5576                                                Value *CmpLHS, Value *CmpRHS,
5577                                                Value *TrueVal, Value *FalseVal,
5578                                                Value *&LHS, Value *&RHS) {
5579   // Try to match
5580   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5581   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5582   // and return description of the outer Max/Min.
5583 
5584   // First, check if select has inverse order:
5585   if (CmpRHS == FalseVal) {
5586     std::swap(TrueVal, FalseVal);
5587     Pred = CmpInst::getInversePredicate(Pred);
5588   }
5589 
5590   // Assume success now. If there's no match, callers should not use these anyway.
5591   LHS = TrueVal;
5592   RHS = FalseVal;
5593 
5594   const APFloat *FC1;
5595   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5596     return {SPF_UNKNOWN, SPNB_NA, false};
5597 
5598   const APFloat *FC2;
5599   switch (Pred) {
5600   case CmpInst::FCMP_OLT:
5601   case CmpInst::FCMP_OLE:
5602   case CmpInst::FCMP_ULT:
5603   case CmpInst::FCMP_ULE:
5604     if (match(FalseVal,
5605               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5606                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5607         *FC1 < *FC2)
5608       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5609     break;
5610   case CmpInst::FCMP_OGT:
5611   case CmpInst::FCMP_OGE:
5612   case CmpInst::FCMP_UGT:
5613   case CmpInst::FCMP_UGE:
5614     if (match(FalseVal,
5615               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5616                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5617         *FC1 > *FC2)
5618       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5619     break;
5620   default:
5621     break;
5622   }
5623 
5624   return {SPF_UNKNOWN, SPNB_NA, false};
5625 }
5626 
5627 /// Recognize variations of:
5628 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5629 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5630                                       Value *CmpLHS, Value *CmpRHS,
5631                                       Value *TrueVal, Value *FalseVal) {
5632   // Swap the select operands and predicate to match the patterns below.
5633   if (CmpRHS != TrueVal) {
5634     Pred = ICmpInst::getSwappedPredicate(Pred);
5635     std::swap(TrueVal, FalseVal);
5636   }
5637   const APInt *C1;
5638   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5639     const APInt *C2;
5640     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5641     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5642         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5643       return {SPF_SMAX, SPNB_NA, false};
5644 
5645     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5646     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5647         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5648       return {SPF_SMIN, SPNB_NA, false};
5649 
5650     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5651     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5652         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5653       return {SPF_UMAX, SPNB_NA, false};
5654 
5655     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5656     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5657         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5658       return {SPF_UMIN, SPNB_NA, false};
5659   }
5660   return {SPF_UNKNOWN, SPNB_NA, false};
5661 }
5662 
5663 /// Recognize variations of:
5664 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5665 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5666                                                Value *CmpLHS, Value *CmpRHS,
5667                                                Value *TVal, Value *FVal,
5668                                                unsigned Depth) {
5669   // TODO: Allow FP min/max with nnan/nsz.
5670   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5671 
5672   Value *A = nullptr, *B = nullptr;
5673   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5674   if (!SelectPatternResult::isMinOrMax(L.Flavor))
5675     return {SPF_UNKNOWN, SPNB_NA, false};
5676 
5677   Value *C = nullptr, *D = nullptr;
5678   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5679   if (L.Flavor != R.Flavor)
5680     return {SPF_UNKNOWN, SPNB_NA, false};
5681 
5682   // We have something like: x Pred y ? min(a, b) : min(c, d).
5683   // Try to match the compare to the min/max operations of the select operands.
5684   // First, make sure we have the right compare predicate.
5685   switch (L.Flavor) {
5686   case SPF_SMIN:
5687     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5688       Pred = ICmpInst::getSwappedPredicate(Pred);
5689       std::swap(CmpLHS, CmpRHS);
5690     }
5691     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5692       break;
5693     return {SPF_UNKNOWN, SPNB_NA, false};
5694   case SPF_SMAX:
5695     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5696       Pred = ICmpInst::getSwappedPredicate(Pred);
5697       std::swap(CmpLHS, CmpRHS);
5698     }
5699     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5700       break;
5701     return {SPF_UNKNOWN, SPNB_NA, false};
5702   case SPF_UMIN:
5703     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5704       Pred = ICmpInst::getSwappedPredicate(Pred);
5705       std::swap(CmpLHS, CmpRHS);
5706     }
5707     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5708       break;
5709     return {SPF_UNKNOWN, SPNB_NA, false};
5710   case SPF_UMAX:
5711     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5712       Pred = ICmpInst::getSwappedPredicate(Pred);
5713       std::swap(CmpLHS, CmpRHS);
5714     }
5715     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5716       break;
5717     return {SPF_UNKNOWN, SPNB_NA, false};
5718   default:
5719     return {SPF_UNKNOWN, SPNB_NA, false};
5720   }
5721 
5722   // If there is a common operand in the already matched min/max and the other
5723   // min/max operands match the compare operands (either directly or inverted),
5724   // then this is min/max of the same flavor.
5725 
5726   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5727   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5728   if (D == B) {
5729     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5730                                          match(A, m_Not(m_Specific(CmpRHS)))))
5731       return {L.Flavor, SPNB_NA, false};
5732   }
5733   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5734   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5735   if (C == B) {
5736     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5737                                          match(A, m_Not(m_Specific(CmpRHS)))))
5738       return {L.Flavor, SPNB_NA, false};
5739   }
5740   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5741   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5742   if (D == A) {
5743     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5744                                          match(B, m_Not(m_Specific(CmpRHS)))))
5745       return {L.Flavor, SPNB_NA, false};
5746   }
5747   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5748   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5749   if (C == A) {
5750     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5751                                          match(B, m_Not(m_Specific(CmpRHS)))))
5752       return {L.Flavor, SPNB_NA, false};
5753   }
5754 
5755   return {SPF_UNKNOWN, SPNB_NA, false};
5756 }
5757 
5758 /// If the input value is the result of a 'not' op, constant integer, or vector
5759 /// splat of a constant integer, return the bitwise-not source value.
5760 /// TODO: This could be extended to handle non-splat vector integer constants.
5761 static Value *getNotValue(Value *V) {
5762   Value *NotV;
5763   if (match(V, m_Not(m_Value(NotV))))
5764     return NotV;
5765 
5766   const APInt *C;
5767   if (match(V, m_APInt(C)))
5768     return ConstantInt::get(V->getType(), ~(*C));
5769 
5770   return nullptr;
5771 }
5772 
5773 /// Match non-obvious integer minimum and maximum sequences.
5774 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5775                                        Value *CmpLHS, Value *CmpRHS,
5776                                        Value *TrueVal, Value *FalseVal,
5777                                        Value *&LHS, Value *&RHS,
5778                                        unsigned Depth) {
5779   // Assume success. If there's no match, callers should not use these anyway.
5780   LHS = TrueVal;
5781   RHS = FalseVal;
5782 
5783   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5784   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5785     return SPR;
5786 
5787   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5788   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5789     return SPR;
5790 
5791   // Look through 'not' ops to find disguised min/max.
5792   // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5793   // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5794   if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5795     switch (Pred) {
5796     case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5797     case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5798     case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5799     case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5800     default: break;
5801     }
5802   }
5803 
5804   // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5805   // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5806   if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5807     switch (Pred) {
5808     case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5809     case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5810     case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5811     case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5812     default: break;
5813     }
5814   }
5815 
5816   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5817     return {SPF_UNKNOWN, SPNB_NA, false};
5818 
5819   // Z = X -nsw Y
5820   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5821   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5822   if (match(TrueVal, m_Zero()) &&
5823       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5824     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5825 
5826   // Z = X -nsw Y
5827   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5828   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5829   if (match(FalseVal, m_Zero()) &&
5830       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5831     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5832 
5833   const APInt *C1;
5834   if (!match(CmpRHS, m_APInt(C1)))
5835     return {SPF_UNKNOWN, SPNB_NA, false};
5836 
5837   // An unsigned min/max can be written with a signed compare.
5838   const APInt *C2;
5839   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5840       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5841     // Is the sign bit set?
5842     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5843     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5844     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5845         C2->isMaxSignedValue())
5846       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5847 
5848     // Is the sign bit clear?
5849     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5850     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5851     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5852         C2->isMinSignedValue())
5853       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5854   }
5855 
5856   return {SPF_UNKNOWN, SPNB_NA, false};
5857 }
5858 
5859 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5860   assert(X && Y && "Invalid operand");
5861 
5862   // X = sub (0, Y) || X = sub nsw (0, Y)
5863   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5864       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5865     return true;
5866 
5867   // Y = sub (0, X) || Y = sub nsw (0, X)
5868   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5869       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5870     return true;
5871 
5872   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5873   Value *A, *B;
5874   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5875                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5876          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5877                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5878 }
5879 
5880 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5881                                               FastMathFlags FMF,
5882                                               Value *CmpLHS, Value *CmpRHS,
5883                                               Value *TrueVal, Value *FalseVal,
5884                                               Value *&LHS, Value *&RHS,
5885                                               unsigned Depth) {
5886   if (CmpInst::isFPPredicate(Pred)) {
5887     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5888     // 0.0 operand, set the compare's 0.0 operands to that same value for the
5889     // purpose of identifying min/max. Disregard vector constants with undefined
5890     // elements because those can not be back-propagated for analysis.
5891     Value *OutputZeroVal = nullptr;
5892     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5893         !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
5894       OutputZeroVal = TrueVal;
5895     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5896              !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
5897       OutputZeroVal = FalseVal;
5898 
5899     if (OutputZeroVal) {
5900       if (match(CmpLHS, m_AnyZeroFP()))
5901         CmpLHS = OutputZeroVal;
5902       if (match(CmpRHS, m_AnyZeroFP()))
5903         CmpRHS = OutputZeroVal;
5904     }
5905   }
5906 
5907   LHS = CmpLHS;
5908   RHS = CmpRHS;
5909 
5910   // Signed zero may return inconsistent results between implementations.
5911   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5912   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5913   // Therefore, we behave conservatively and only proceed if at least one of the
5914   // operands is known to not be zero or if we don't care about signed zero.
5915   switch (Pred) {
5916   default: break;
5917   // FIXME: Include OGT/OLT/UGT/ULT.
5918   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5919   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5920     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5921         !isKnownNonZero(CmpRHS))
5922       return {SPF_UNKNOWN, SPNB_NA, false};
5923   }
5924 
5925   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5926   bool Ordered = false;
5927 
5928   // When given one NaN and one non-NaN input:
5929   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5930   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5931   //     ordered comparison fails), which could be NaN or non-NaN.
5932   // so here we discover exactly what NaN behavior is required/accepted.
5933   if (CmpInst::isFPPredicate(Pred)) {
5934     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5935     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5936 
5937     if (LHSSafe && RHSSafe) {
5938       // Both operands are known non-NaN.
5939       NaNBehavior = SPNB_RETURNS_ANY;
5940     } else if (CmpInst::isOrdered(Pred)) {
5941       // An ordered comparison will return false when given a NaN, so it
5942       // returns the RHS.
5943       Ordered = true;
5944       if (LHSSafe)
5945         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5946         NaNBehavior = SPNB_RETURNS_NAN;
5947       else if (RHSSafe)
5948         NaNBehavior = SPNB_RETURNS_OTHER;
5949       else
5950         // Completely unsafe.
5951         return {SPF_UNKNOWN, SPNB_NA, false};
5952     } else {
5953       Ordered = false;
5954       // An unordered comparison will return true when given a NaN, so it
5955       // returns the LHS.
5956       if (LHSSafe)
5957         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5958         NaNBehavior = SPNB_RETURNS_OTHER;
5959       else if (RHSSafe)
5960         NaNBehavior = SPNB_RETURNS_NAN;
5961       else
5962         // Completely unsafe.
5963         return {SPF_UNKNOWN, SPNB_NA, false};
5964     }
5965   }
5966 
5967   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5968     std::swap(CmpLHS, CmpRHS);
5969     Pred = CmpInst::getSwappedPredicate(Pred);
5970     if (NaNBehavior == SPNB_RETURNS_NAN)
5971       NaNBehavior = SPNB_RETURNS_OTHER;
5972     else if (NaNBehavior == SPNB_RETURNS_OTHER)
5973       NaNBehavior = SPNB_RETURNS_NAN;
5974     Ordered = !Ordered;
5975   }
5976 
5977   // ([if]cmp X, Y) ? X : Y
5978   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5979     switch (Pred) {
5980     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5981     case ICmpInst::ICMP_UGT:
5982     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5983     case ICmpInst::ICMP_SGT:
5984     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5985     case ICmpInst::ICMP_ULT:
5986     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5987     case ICmpInst::ICMP_SLT:
5988     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5989     case FCmpInst::FCMP_UGT:
5990     case FCmpInst::FCMP_UGE:
5991     case FCmpInst::FCMP_OGT:
5992     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5993     case FCmpInst::FCMP_ULT:
5994     case FCmpInst::FCMP_ULE:
5995     case FCmpInst::FCMP_OLT:
5996     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5997     }
5998   }
5999 
6000   if (isKnownNegation(TrueVal, FalseVal)) {
6001     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
6002     // match against either LHS or sext(LHS).
6003     auto MaybeSExtCmpLHS =
6004         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
6005     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
6006     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
6007     if (match(TrueVal, MaybeSExtCmpLHS)) {
6008       // Set the return values. If the compare uses the negated value (-X >s 0),
6009       // swap the return values because the negated value is always 'RHS'.
6010       LHS = TrueVal;
6011       RHS = FalseVal;
6012       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
6013         std::swap(LHS, RHS);
6014 
6015       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
6016       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
6017       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6018         return {SPF_ABS, SPNB_NA, false};
6019 
6020       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
6021       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
6022         return {SPF_ABS, SPNB_NA, false};
6023 
6024       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
6025       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
6026       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6027         return {SPF_NABS, SPNB_NA, false};
6028     }
6029     else if (match(FalseVal, MaybeSExtCmpLHS)) {
6030       // Set the return values. If the compare uses the negated value (-X >s 0),
6031       // swap the return values because the negated value is always 'RHS'.
6032       LHS = FalseVal;
6033       RHS = TrueVal;
6034       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
6035         std::swap(LHS, RHS);
6036 
6037       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
6038       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
6039       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6040         return {SPF_NABS, SPNB_NA, false};
6041 
6042       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
6043       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
6044       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6045         return {SPF_ABS, SPNB_NA, false};
6046     }
6047   }
6048 
6049   if (CmpInst::isIntPredicate(Pred))
6050     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
6051 
6052   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
6053   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
6054   // semantics than minNum. Be conservative in such case.
6055   if (NaNBehavior != SPNB_RETURNS_ANY ||
6056       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
6057        !isKnownNonZero(CmpRHS)))
6058     return {SPF_UNKNOWN, SPNB_NA, false};
6059 
6060   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
6061 }
6062 
6063 /// Helps to match a select pattern in case of a type mismatch.
6064 ///
6065 /// The function processes the case when type of true and false values of a
6066 /// select instruction differs from type of the cmp instruction operands because
6067 /// of a cast instruction. The function checks if it is legal to move the cast
6068 /// operation after "select". If yes, it returns the new second value of
6069 /// "select" (with the assumption that cast is moved):
6070 /// 1. As operand of cast instruction when both values of "select" are same cast
6071 /// instructions.
6072 /// 2. As restored constant (by applying reverse cast operation) when the first
6073 /// value of the "select" is a cast operation and the second value is a
6074 /// constant.
6075 /// NOTE: We return only the new second value because the first value could be
6076 /// accessed as operand of cast instruction.
6077 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
6078                               Instruction::CastOps *CastOp) {
6079   auto *Cast1 = dyn_cast<CastInst>(V1);
6080   if (!Cast1)
6081     return nullptr;
6082 
6083   *CastOp = Cast1->getOpcode();
6084   Type *SrcTy = Cast1->getSrcTy();
6085   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
6086     // If V1 and V2 are both the same cast from the same type, look through V1.
6087     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
6088       return Cast2->getOperand(0);
6089     return nullptr;
6090   }
6091 
6092   auto *C = dyn_cast<Constant>(V2);
6093   if (!C)
6094     return nullptr;
6095 
6096   Constant *CastedTo = nullptr;
6097   switch (*CastOp) {
6098   case Instruction::ZExt:
6099     if (CmpI->isUnsigned())
6100       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
6101     break;
6102   case Instruction::SExt:
6103     if (CmpI->isSigned())
6104       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
6105     break;
6106   case Instruction::Trunc:
6107     Constant *CmpConst;
6108     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
6109         CmpConst->getType() == SrcTy) {
6110       // Here we have the following case:
6111       //
6112       //   %cond = cmp iN %x, CmpConst
6113       //   %tr = trunc iN %x to iK
6114       //   %narrowsel = select i1 %cond, iK %t, iK C
6115       //
6116       // We can always move trunc after select operation:
6117       //
6118       //   %cond = cmp iN %x, CmpConst
6119       //   %widesel = select i1 %cond, iN %x, iN CmpConst
6120       //   %tr = trunc iN %widesel to iK
6121       //
6122       // Note that C could be extended in any way because we don't care about
6123       // upper bits after truncation. It can't be abs pattern, because it would
6124       // look like:
6125       //
6126       //   select i1 %cond, x, -x.
6127       //
6128       // So only min/max pattern could be matched. Such match requires widened C
6129       // == CmpConst. That is why set widened C = CmpConst, condition trunc
6130       // CmpConst == C is checked below.
6131       CastedTo = CmpConst;
6132     } else {
6133       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
6134     }
6135     break;
6136   case Instruction::FPTrunc:
6137     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
6138     break;
6139   case Instruction::FPExt:
6140     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
6141     break;
6142   case Instruction::FPToUI:
6143     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
6144     break;
6145   case Instruction::FPToSI:
6146     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
6147     break;
6148   case Instruction::UIToFP:
6149     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
6150     break;
6151   case Instruction::SIToFP:
6152     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
6153     break;
6154   default:
6155     break;
6156   }
6157 
6158   if (!CastedTo)
6159     return nullptr;
6160 
6161   // Make sure the cast doesn't lose any information.
6162   Constant *CastedBack =
6163       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
6164   if (CastedBack != C)
6165     return nullptr;
6166 
6167   return CastedTo;
6168 }
6169 
6170 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
6171                                              Instruction::CastOps *CastOp,
6172                                              unsigned Depth) {
6173   if (Depth >= MaxAnalysisRecursionDepth)
6174     return {SPF_UNKNOWN, SPNB_NA, false};
6175 
6176   SelectInst *SI = dyn_cast<SelectInst>(V);
6177   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
6178 
6179   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
6180   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
6181 
6182   Value *TrueVal = SI->getTrueValue();
6183   Value *FalseVal = SI->getFalseValue();
6184 
6185   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
6186                                             CastOp, Depth);
6187 }
6188 
6189 SelectPatternResult llvm::matchDecomposedSelectPattern(
6190     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
6191     Instruction::CastOps *CastOp, unsigned Depth) {
6192   CmpInst::Predicate Pred = CmpI->getPredicate();
6193   Value *CmpLHS = CmpI->getOperand(0);
6194   Value *CmpRHS = CmpI->getOperand(1);
6195   FastMathFlags FMF;
6196   if (isa<FPMathOperator>(CmpI))
6197     FMF = CmpI->getFastMathFlags();
6198 
6199   // Bail out early.
6200   if (CmpI->isEquality())
6201     return {SPF_UNKNOWN, SPNB_NA, false};
6202 
6203   // Deal with type mismatches.
6204   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
6205     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
6206       // If this is a potential fmin/fmax with a cast to integer, then ignore
6207       // -0.0 because there is no corresponding integer value.
6208       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6209         FMF.setNoSignedZeros();
6210       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6211                                   cast<CastInst>(TrueVal)->getOperand(0), C,
6212                                   LHS, RHS, Depth);
6213     }
6214     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
6215       // If this is a potential fmin/fmax with a cast to integer, then ignore
6216       // -0.0 because there is no corresponding integer value.
6217       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6218         FMF.setNoSignedZeros();
6219       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6220                                   C, cast<CastInst>(FalseVal)->getOperand(0),
6221                                   LHS, RHS, Depth);
6222     }
6223   }
6224   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
6225                               LHS, RHS, Depth);
6226 }
6227 
6228 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
6229   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
6230   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
6231   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
6232   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
6233   if (SPF == SPF_FMINNUM)
6234     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
6235   if (SPF == SPF_FMAXNUM)
6236     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
6237   llvm_unreachable("unhandled!");
6238 }
6239 
6240 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
6241   if (SPF == SPF_SMIN) return SPF_SMAX;
6242   if (SPF == SPF_UMIN) return SPF_UMAX;
6243   if (SPF == SPF_SMAX) return SPF_SMIN;
6244   if (SPF == SPF_UMAX) return SPF_UMIN;
6245   llvm_unreachable("unhandled!");
6246 }
6247 
6248 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
6249   switch (MinMaxID) {
6250   case Intrinsic::smax: return Intrinsic::smin;
6251   case Intrinsic::smin: return Intrinsic::smax;
6252   case Intrinsic::umax: return Intrinsic::umin;
6253   case Intrinsic::umin: return Intrinsic::umax;
6254   default: llvm_unreachable("Unexpected intrinsic");
6255   }
6256 }
6257 
6258 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
6259   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
6260 }
6261 
6262 APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) {
6263   switch (SPF) {
6264   case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth);
6265   case SPF_SMIN: return APInt::getSignedMinValue(BitWidth);
6266   case SPF_UMAX: return APInt::getMaxValue(BitWidth);
6267   case SPF_UMIN: return APInt::getMinValue(BitWidth);
6268   default: llvm_unreachable("Unexpected flavor");
6269   }
6270 }
6271 
6272 std::pair<Intrinsic::ID, bool>
6273 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
6274   // Check if VL contains select instructions that can be folded into a min/max
6275   // vector intrinsic and return the intrinsic if it is possible.
6276   // TODO: Support floating point min/max.
6277   bool AllCmpSingleUse = true;
6278   SelectPatternResult SelectPattern;
6279   SelectPattern.Flavor = SPF_UNKNOWN;
6280   if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
6281         Value *LHS, *RHS;
6282         auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
6283         if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
6284             CurrentPattern.Flavor == SPF_FMINNUM ||
6285             CurrentPattern.Flavor == SPF_FMAXNUM ||
6286             !I->getType()->isIntOrIntVectorTy())
6287           return false;
6288         if (SelectPattern.Flavor != SPF_UNKNOWN &&
6289             SelectPattern.Flavor != CurrentPattern.Flavor)
6290           return false;
6291         SelectPattern = CurrentPattern;
6292         AllCmpSingleUse &=
6293             match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
6294         return true;
6295       })) {
6296     switch (SelectPattern.Flavor) {
6297     case SPF_SMIN:
6298       return {Intrinsic::smin, AllCmpSingleUse};
6299     case SPF_UMIN:
6300       return {Intrinsic::umin, AllCmpSingleUse};
6301     case SPF_SMAX:
6302       return {Intrinsic::smax, AllCmpSingleUse};
6303     case SPF_UMAX:
6304       return {Intrinsic::umax, AllCmpSingleUse};
6305     default:
6306       llvm_unreachable("unexpected select pattern flavor");
6307     }
6308   }
6309   return {Intrinsic::not_intrinsic, false};
6310 }
6311 
6312 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
6313                                  Value *&Start, Value *&Step) {
6314   // Handle the case of a simple two-predecessor recurrence PHI.
6315   // There's a lot more that could theoretically be done here, but
6316   // this is sufficient to catch some interesting cases.
6317   if (P->getNumIncomingValues() != 2)
6318     return false;
6319 
6320   for (unsigned i = 0; i != 2; ++i) {
6321     Value *L = P->getIncomingValue(i);
6322     Value *R = P->getIncomingValue(!i);
6323     Operator *LU = dyn_cast<Operator>(L);
6324     if (!LU)
6325       continue;
6326     unsigned Opcode = LU->getOpcode();
6327 
6328     switch (Opcode) {
6329     default:
6330       continue;
6331     // TODO: Expand list -- xor, div, gep, uaddo, etc..
6332     case Instruction::LShr:
6333     case Instruction::AShr:
6334     case Instruction::Shl:
6335     case Instruction::Add:
6336     case Instruction::Sub:
6337     case Instruction::And:
6338     case Instruction::Or:
6339     case Instruction::Mul: {
6340       Value *LL = LU->getOperand(0);
6341       Value *LR = LU->getOperand(1);
6342       // Find a recurrence.
6343       if (LL == P)
6344         L = LR;
6345       else if (LR == P)
6346         L = LL;
6347       else
6348         continue; // Check for recurrence with L and R flipped.
6349 
6350       break; // Match!
6351     }
6352     };
6353 
6354     // We have matched a recurrence of the form:
6355     //   %iv = [R, %entry], [%iv.next, %backedge]
6356     //   %iv.next = binop %iv, L
6357     // OR
6358     //   %iv = [R, %entry], [%iv.next, %backedge]
6359     //   %iv.next = binop L, %iv
6360     BO = cast<BinaryOperator>(LU);
6361     Start = R;
6362     Step = L;
6363     return true;
6364   }
6365   return false;
6366 }
6367 
6368 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
6369                                  Value *&Start, Value *&Step) {
6370   BinaryOperator *BO = nullptr;
6371   P = dyn_cast<PHINode>(I->getOperand(0));
6372   if (!P)
6373     P = dyn_cast<PHINode>(I->getOperand(1));
6374   return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
6375 }
6376 
6377 /// Return true if "icmp Pred LHS RHS" is always true.
6378 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6379                             const Value *RHS, const DataLayout &DL,
6380                             unsigned Depth) {
6381   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
6382   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6383     return true;
6384 
6385   switch (Pred) {
6386   default:
6387     return false;
6388 
6389   case CmpInst::ICMP_SLE: {
6390     const APInt *C;
6391 
6392     // LHS s<= LHS +_{nsw} C   if C >= 0
6393     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6394       return !C->isNegative();
6395     return false;
6396   }
6397 
6398   case CmpInst::ICMP_ULE: {
6399     const APInt *C;
6400 
6401     // LHS u<= LHS +_{nuw} C   for any C
6402     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6403       return true;
6404 
6405     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6406     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6407                                        const Value *&X,
6408                                        const APInt *&CA, const APInt *&CB) {
6409       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6410           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6411         return true;
6412 
6413       // If X & C == 0 then (X | C) == X +_{nuw} C
6414       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6415           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6416         KnownBits Known(CA->getBitWidth());
6417         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6418                          /*CxtI*/ nullptr, /*DT*/ nullptr);
6419         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6420           return true;
6421       }
6422 
6423       return false;
6424     };
6425 
6426     const Value *X;
6427     const APInt *CLHS, *CRHS;
6428     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6429       return CLHS->ule(*CRHS);
6430 
6431     return false;
6432   }
6433   }
6434 }
6435 
6436 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6437 /// ALHS ARHS" is true.  Otherwise, return None.
6438 static Optional<bool>
6439 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6440                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
6441                       const DataLayout &DL, unsigned Depth) {
6442   switch (Pred) {
6443   default:
6444     return None;
6445 
6446   case CmpInst::ICMP_SLT:
6447   case CmpInst::ICMP_SLE:
6448     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6449         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6450       return true;
6451     return None;
6452 
6453   case CmpInst::ICMP_ULT:
6454   case CmpInst::ICMP_ULE:
6455     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6456         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6457       return true;
6458     return None;
6459   }
6460 }
6461 
6462 /// Return true if the operands of the two compares match.  IsSwappedOps is true
6463 /// when the operands match, but are swapped.
6464 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6465                           const Value *BLHS, const Value *BRHS,
6466                           bool &IsSwappedOps) {
6467 
6468   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6469   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6470   return IsMatchingOps || IsSwappedOps;
6471 }
6472 
6473 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6474 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6475 /// Otherwise, return None if we can't infer anything.
6476 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6477                                                     CmpInst::Predicate BPred,
6478                                                     bool AreSwappedOps) {
6479   // Canonicalize the predicate as if the operands were not commuted.
6480   if (AreSwappedOps)
6481     BPred = ICmpInst::getSwappedPredicate(BPred);
6482 
6483   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6484     return true;
6485   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6486     return false;
6487 
6488   return None;
6489 }
6490 
6491 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6492 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6493 /// Otherwise, return None if we can't infer anything.
6494 static Optional<bool>
6495 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6496                                  const ConstantInt *C1,
6497                                  CmpInst::Predicate BPred,
6498                                  const ConstantInt *C2) {
6499   ConstantRange DomCR =
6500       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6501   ConstantRange CR = ConstantRange::makeExactICmpRegion(BPred, C2->getValue());
6502   ConstantRange Intersection = DomCR.intersectWith(CR);
6503   ConstantRange Difference = DomCR.difference(CR);
6504   if (Intersection.isEmptySet())
6505     return false;
6506   if (Difference.isEmptySet())
6507     return true;
6508   return None;
6509 }
6510 
6511 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6512 /// false.  Otherwise, return None if we can't infer anything.
6513 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6514                                          CmpInst::Predicate BPred,
6515                                          const Value *BLHS, const Value *BRHS,
6516                                          const DataLayout &DL, bool LHSIsTrue,
6517                                          unsigned Depth) {
6518   Value *ALHS = LHS->getOperand(0);
6519   Value *ARHS = LHS->getOperand(1);
6520 
6521   // The rest of the logic assumes the LHS condition is true.  If that's not the
6522   // case, invert the predicate to make it so.
6523   CmpInst::Predicate APred =
6524       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6525 
6526   // Can we infer anything when the two compares have matching operands?
6527   bool AreSwappedOps;
6528   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6529     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6530             APred, BPred, AreSwappedOps))
6531       return Implication;
6532     // No amount of additional analysis will infer the second condition, so
6533     // early exit.
6534     return None;
6535   }
6536 
6537   // Can we infer anything when the LHS operands match and the RHS operands are
6538   // constants (not necessarily matching)?
6539   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6540     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6541             APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6542       return Implication;
6543     // No amount of additional analysis will infer the second condition, so
6544     // early exit.
6545     return None;
6546   }
6547 
6548   if (APred == BPred)
6549     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6550   return None;
6551 }
6552 
6553 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6554 /// false.  Otherwise, return None if we can't infer anything.  We expect the
6555 /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction.
6556 static Optional<bool>
6557 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6558                    const Value *RHSOp0, const Value *RHSOp1,
6559                    const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6560   // The LHS must be an 'or', 'and', or a 'select' instruction.
6561   assert((LHS->getOpcode() == Instruction::And ||
6562           LHS->getOpcode() == Instruction::Or ||
6563           LHS->getOpcode() == Instruction::Select) &&
6564          "Expected LHS to be 'and', 'or', or 'select'.");
6565 
6566   assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6567 
6568   // If the result of an 'or' is false, then we know both legs of the 'or' are
6569   // false.  Similarly, if the result of an 'and' is true, then we know both
6570   // legs of the 'and' are true.
6571   const Value *ALHS, *ARHS;
6572   if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6573       (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6574     // FIXME: Make this non-recursion.
6575     if (Optional<bool> Implication = isImpliedCondition(
6576             ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6577       return Implication;
6578     if (Optional<bool> Implication = isImpliedCondition(
6579             ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6580       return Implication;
6581     return None;
6582   }
6583   return None;
6584 }
6585 
6586 Optional<bool>
6587 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6588                          const Value *RHSOp0, const Value *RHSOp1,
6589                          const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6590   // Bail out when we hit the limit.
6591   if (Depth == MaxAnalysisRecursionDepth)
6592     return None;
6593 
6594   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6595   // example.
6596   if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6597     return None;
6598 
6599   Type *OpTy = LHS->getType();
6600   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6601 
6602   // FIXME: Extending the code below to handle vectors.
6603   if (OpTy->isVectorTy())
6604     return None;
6605 
6606   assert(OpTy->isIntegerTy(1) && "implied by above");
6607 
6608   // Both LHS and RHS are icmps.
6609   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6610   if (LHSCmp)
6611     return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6612                               Depth);
6613 
6614   /// The LHS should be an 'or', 'and', or a 'select' instruction.  We expect
6615   /// the RHS to be an icmp.
6616   /// FIXME: Add support for and/or/select on the RHS.
6617   if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6618     if ((LHSI->getOpcode() == Instruction::And ||
6619          LHSI->getOpcode() == Instruction::Or ||
6620          LHSI->getOpcode() == Instruction::Select))
6621       return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6622                                 Depth);
6623   }
6624   return None;
6625 }
6626 
6627 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6628                                         const DataLayout &DL, bool LHSIsTrue,
6629                                         unsigned Depth) {
6630   // LHS ==> RHS by definition
6631   if (LHS == RHS)
6632     return LHSIsTrue;
6633 
6634   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6635   if (RHSCmp)
6636     return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6637                               RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6638                               LHSIsTrue, Depth);
6639   return None;
6640 }
6641 
6642 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6643 // condition dominating ContextI or nullptr, if no condition is found.
6644 static std::pair<Value *, bool>
6645 getDomPredecessorCondition(const Instruction *ContextI) {
6646   if (!ContextI || !ContextI->getParent())
6647     return {nullptr, false};
6648 
6649   // TODO: This is a poor/cheap way to determine dominance. Should we use a
6650   // dominator tree (eg, from a SimplifyQuery) instead?
6651   const BasicBlock *ContextBB = ContextI->getParent();
6652   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6653   if (!PredBB)
6654     return {nullptr, false};
6655 
6656   // We need a conditional branch in the predecessor.
6657   Value *PredCond;
6658   BasicBlock *TrueBB, *FalseBB;
6659   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6660     return {nullptr, false};
6661 
6662   // The branch should get simplified. Don't bother simplifying this condition.
6663   if (TrueBB == FalseBB)
6664     return {nullptr, false};
6665 
6666   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6667          "Predecessor block does not point to successor?");
6668 
6669   // Is this condition implied by the predecessor condition?
6670   return {PredCond, TrueBB == ContextBB};
6671 }
6672 
6673 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6674                                              const Instruction *ContextI,
6675                                              const DataLayout &DL) {
6676   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6677   auto PredCond = getDomPredecessorCondition(ContextI);
6678   if (PredCond.first)
6679     return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6680   return None;
6681 }
6682 
6683 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6684                                              const Value *LHS, const Value *RHS,
6685                                              const Instruction *ContextI,
6686                                              const DataLayout &DL) {
6687   auto PredCond = getDomPredecessorCondition(ContextI);
6688   if (PredCond.first)
6689     return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6690                               PredCond.second);
6691   return None;
6692 }
6693 
6694 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6695                               APInt &Upper, const InstrInfoQuery &IIQ) {
6696   unsigned Width = Lower.getBitWidth();
6697   const APInt *C;
6698   switch (BO.getOpcode()) {
6699   case Instruction::Add:
6700     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6701       // FIXME: If we have both nuw and nsw, we should reduce the range further.
6702       if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6703         // 'add nuw x, C' produces [C, UINT_MAX].
6704         Lower = *C;
6705       } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6706         if (C->isNegative()) {
6707           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6708           Lower = APInt::getSignedMinValue(Width);
6709           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6710         } else {
6711           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6712           Lower = APInt::getSignedMinValue(Width) + *C;
6713           Upper = APInt::getSignedMaxValue(Width) + 1;
6714         }
6715       }
6716     }
6717     break;
6718 
6719   case Instruction::And:
6720     if (match(BO.getOperand(1), m_APInt(C)))
6721       // 'and x, C' produces [0, C].
6722       Upper = *C + 1;
6723     break;
6724 
6725   case Instruction::Or:
6726     if (match(BO.getOperand(1), m_APInt(C)))
6727       // 'or x, C' produces [C, UINT_MAX].
6728       Lower = *C;
6729     break;
6730 
6731   case Instruction::AShr:
6732     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6733       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6734       Lower = APInt::getSignedMinValue(Width).ashr(*C);
6735       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6736     } else if (match(BO.getOperand(0), m_APInt(C))) {
6737       unsigned ShiftAmount = Width - 1;
6738       if (!C->isNullValue() && IIQ.isExact(&BO))
6739         ShiftAmount = C->countTrailingZeros();
6740       if (C->isNegative()) {
6741         // 'ashr C, x' produces [C, C >> (Width-1)]
6742         Lower = *C;
6743         Upper = C->ashr(ShiftAmount) + 1;
6744       } else {
6745         // 'ashr C, x' produces [C >> (Width-1), C]
6746         Lower = C->ashr(ShiftAmount);
6747         Upper = *C + 1;
6748       }
6749     }
6750     break;
6751 
6752   case Instruction::LShr:
6753     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6754       // 'lshr x, C' produces [0, UINT_MAX >> C].
6755       Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
6756     } else if (match(BO.getOperand(0), m_APInt(C))) {
6757       // 'lshr C, x' produces [C >> (Width-1), C].
6758       unsigned ShiftAmount = Width - 1;
6759       if (!C->isNullValue() && IIQ.isExact(&BO))
6760         ShiftAmount = C->countTrailingZeros();
6761       Lower = C->lshr(ShiftAmount);
6762       Upper = *C + 1;
6763     }
6764     break;
6765 
6766   case Instruction::Shl:
6767     if (match(BO.getOperand(0), m_APInt(C))) {
6768       if (IIQ.hasNoUnsignedWrap(&BO)) {
6769         // 'shl nuw C, x' produces [C, C << CLZ(C)]
6770         Lower = *C;
6771         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6772       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6773         if (C->isNegative()) {
6774           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6775           unsigned ShiftAmount = C->countLeadingOnes() - 1;
6776           Lower = C->shl(ShiftAmount);
6777           Upper = *C + 1;
6778         } else {
6779           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6780           unsigned ShiftAmount = C->countLeadingZeros() - 1;
6781           Lower = *C;
6782           Upper = C->shl(ShiftAmount) + 1;
6783         }
6784       }
6785     }
6786     break;
6787 
6788   case Instruction::SDiv:
6789     if (match(BO.getOperand(1), m_APInt(C))) {
6790       APInt IntMin = APInt::getSignedMinValue(Width);
6791       APInt IntMax = APInt::getSignedMaxValue(Width);
6792       if (C->isAllOnesValue()) {
6793         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6794         //    where C != -1 and C != 0 and C != 1
6795         Lower = IntMin + 1;
6796         Upper = IntMax + 1;
6797       } else if (C->countLeadingZeros() < Width - 1) {
6798         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6799         //    where C != -1 and C != 0 and C != 1
6800         Lower = IntMin.sdiv(*C);
6801         Upper = IntMax.sdiv(*C);
6802         if (Lower.sgt(Upper))
6803           std::swap(Lower, Upper);
6804         Upper = Upper + 1;
6805         assert(Upper != Lower && "Upper part of range has wrapped!");
6806       }
6807     } else if (match(BO.getOperand(0), m_APInt(C))) {
6808       if (C->isMinSignedValue()) {
6809         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6810         Lower = *C;
6811         Upper = Lower.lshr(1) + 1;
6812       } else {
6813         // 'sdiv C, x' produces [-|C|, |C|].
6814         Upper = C->abs() + 1;
6815         Lower = (-Upper) + 1;
6816       }
6817     }
6818     break;
6819 
6820   case Instruction::UDiv:
6821     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6822       // 'udiv x, C' produces [0, UINT_MAX / C].
6823       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6824     } else if (match(BO.getOperand(0), m_APInt(C))) {
6825       // 'udiv C, x' produces [0, C].
6826       Upper = *C + 1;
6827     }
6828     break;
6829 
6830   case Instruction::SRem:
6831     if (match(BO.getOperand(1), m_APInt(C))) {
6832       // 'srem x, C' produces (-|C|, |C|).
6833       Upper = C->abs();
6834       Lower = (-Upper) + 1;
6835     }
6836     break;
6837 
6838   case Instruction::URem:
6839     if (match(BO.getOperand(1), m_APInt(C)))
6840       // 'urem x, C' produces [0, C).
6841       Upper = *C;
6842     break;
6843 
6844   default:
6845     break;
6846   }
6847 }
6848 
6849 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6850                                   APInt &Upper) {
6851   unsigned Width = Lower.getBitWidth();
6852   const APInt *C;
6853   switch (II.getIntrinsicID()) {
6854   case Intrinsic::ctpop:
6855   case Intrinsic::ctlz:
6856   case Intrinsic::cttz:
6857     // Maximum of set/clear bits is the bit width.
6858     assert(Lower == 0 && "Expected lower bound to be zero");
6859     Upper = Width + 1;
6860     break;
6861   case Intrinsic::uadd_sat:
6862     // uadd.sat(x, C) produces [C, UINT_MAX].
6863     if (match(II.getOperand(0), m_APInt(C)) ||
6864         match(II.getOperand(1), m_APInt(C)))
6865       Lower = *C;
6866     break;
6867   case Intrinsic::sadd_sat:
6868     if (match(II.getOperand(0), m_APInt(C)) ||
6869         match(II.getOperand(1), m_APInt(C))) {
6870       if (C->isNegative()) {
6871         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6872         Lower = APInt::getSignedMinValue(Width);
6873         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6874       } else {
6875         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6876         Lower = APInt::getSignedMinValue(Width) + *C;
6877         Upper = APInt::getSignedMaxValue(Width) + 1;
6878       }
6879     }
6880     break;
6881   case Intrinsic::usub_sat:
6882     // usub.sat(C, x) produces [0, C].
6883     if (match(II.getOperand(0), m_APInt(C)))
6884       Upper = *C + 1;
6885     // usub.sat(x, C) produces [0, UINT_MAX - C].
6886     else if (match(II.getOperand(1), m_APInt(C)))
6887       Upper = APInt::getMaxValue(Width) - *C + 1;
6888     break;
6889   case Intrinsic::ssub_sat:
6890     if (match(II.getOperand(0), m_APInt(C))) {
6891       if (C->isNegative()) {
6892         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6893         Lower = APInt::getSignedMinValue(Width);
6894         Upper = *C - APInt::getSignedMinValue(Width) + 1;
6895       } else {
6896         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6897         Lower = *C - APInt::getSignedMaxValue(Width);
6898         Upper = APInt::getSignedMaxValue(Width) + 1;
6899       }
6900     } else if (match(II.getOperand(1), m_APInt(C))) {
6901       if (C->isNegative()) {
6902         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6903         Lower = APInt::getSignedMinValue(Width) - *C;
6904         Upper = APInt::getSignedMaxValue(Width) + 1;
6905       } else {
6906         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6907         Lower = APInt::getSignedMinValue(Width);
6908         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6909       }
6910     }
6911     break;
6912   case Intrinsic::umin:
6913   case Intrinsic::umax:
6914   case Intrinsic::smin:
6915   case Intrinsic::smax:
6916     if (!match(II.getOperand(0), m_APInt(C)) &&
6917         !match(II.getOperand(1), m_APInt(C)))
6918       break;
6919 
6920     switch (II.getIntrinsicID()) {
6921     case Intrinsic::umin:
6922       Upper = *C + 1;
6923       break;
6924     case Intrinsic::umax:
6925       Lower = *C;
6926       break;
6927     case Intrinsic::smin:
6928       Lower = APInt::getSignedMinValue(Width);
6929       Upper = *C + 1;
6930       break;
6931     case Intrinsic::smax:
6932       Lower = *C;
6933       Upper = APInt::getSignedMaxValue(Width) + 1;
6934       break;
6935     default:
6936       llvm_unreachable("Must be min/max intrinsic");
6937     }
6938     break;
6939   case Intrinsic::abs:
6940     // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6941     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6942     if (match(II.getOperand(1), m_One()))
6943       Upper = APInt::getSignedMaxValue(Width) + 1;
6944     else
6945       Upper = APInt::getSignedMinValue(Width) + 1;
6946     break;
6947   default:
6948     break;
6949   }
6950 }
6951 
6952 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6953                                       APInt &Upper, const InstrInfoQuery &IIQ) {
6954   const Value *LHS = nullptr, *RHS = nullptr;
6955   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6956   if (R.Flavor == SPF_UNKNOWN)
6957     return;
6958 
6959   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6960 
6961   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6962     // If the negation part of the abs (in RHS) has the NSW flag,
6963     // then the result of abs(X) is [0..SIGNED_MAX],
6964     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6965     Lower = APInt::getZero(BitWidth);
6966     if (match(RHS, m_Neg(m_Specific(LHS))) &&
6967         IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6968       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6969     else
6970       Upper = APInt::getSignedMinValue(BitWidth) + 1;
6971     return;
6972   }
6973 
6974   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6975     // The result of -abs(X) is <= 0.
6976     Lower = APInt::getSignedMinValue(BitWidth);
6977     Upper = APInt(BitWidth, 1);
6978     return;
6979   }
6980 
6981   const APInt *C;
6982   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6983     return;
6984 
6985   switch (R.Flavor) {
6986     case SPF_UMIN:
6987       Upper = *C + 1;
6988       break;
6989     case SPF_UMAX:
6990       Lower = *C;
6991       break;
6992     case SPF_SMIN:
6993       Lower = APInt::getSignedMinValue(BitWidth);
6994       Upper = *C + 1;
6995       break;
6996     case SPF_SMAX:
6997       Lower = *C;
6998       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6999       break;
7000     default:
7001       break;
7002   }
7003 }
7004 
7005 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
7006                                          AssumptionCache *AC,
7007                                          const Instruction *CtxI,
7008                                          unsigned Depth) {
7009   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
7010 
7011   if (Depth == MaxAnalysisRecursionDepth)
7012     return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
7013 
7014   const APInt *C;
7015   if (match(V, m_APInt(C)))
7016     return ConstantRange(*C);
7017 
7018   InstrInfoQuery IIQ(UseInstrInfo);
7019   unsigned BitWidth = V->getType()->getScalarSizeInBits();
7020   APInt Lower = APInt(BitWidth, 0);
7021   APInt Upper = APInt(BitWidth, 0);
7022   if (auto *BO = dyn_cast<BinaryOperator>(V))
7023     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
7024   else if (auto *II = dyn_cast<IntrinsicInst>(V))
7025     setLimitsForIntrinsic(*II, Lower, Upper);
7026   else if (auto *SI = dyn_cast<SelectInst>(V))
7027     setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
7028 
7029   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
7030 
7031   if (auto *I = dyn_cast<Instruction>(V))
7032     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
7033       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
7034 
7035   if (CtxI && AC) {
7036     // Try to restrict the range based on information from assumptions.
7037     for (auto &AssumeVH : AC->assumptionsFor(V)) {
7038       if (!AssumeVH)
7039         continue;
7040       CallInst *I = cast<CallInst>(AssumeVH);
7041       assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
7042              "Got assumption for the wrong function!");
7043       assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
7044              "must be an assume intrinsic");
7045 
7046       if (!isValidAssumeForContext(I, CtxI, nullptr))
7047         continue;
7048       Value *Arg = I->getArgOperand(0);
7049       ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
7050       // Currently we just use information from comparisons.
7051       if (!Cmp || Cmp->getOperand(0) != V)
7052         continue;
7053       ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
7054                                                AC, I, Depth + 1);
7055       CR = CR.intersectWith(
7056           ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
7057     }
7058   }
7059 
7060   return CR;
7061 }
7062 
7063 static Optional<int64_t>
7064 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
7065   // Skip over the first indices.
7066   gep_type_iterator GTI = gep_type_begin(GEP);
7067   for (unsigned i = 1; i != Idx; ++i, ++GTI)
7068     /*skip along*/;
7069 
7070   // Compute the offset implied by the rest of the indices.
7071   int64_t Offset = 0;
7072   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
7073     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
7074     if (!OpC)
7075       return None;
7076     if (OpC->isZero())
7077       continue; // No offset.
7078 
7079     // Handle struct indices, which add their field offset to the pointer.
7080     if (StructType *STy = GTI.getStructTypeOrNull()) {
7081       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
7082       continue;
7083     }
7084 
7085     // Otherwise, we have a sequential type like an array or fixed-length
7086     // vector. Multiply the index by the ElementSize.
7087     TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
7088     if (Size.isScalable())
7089       return None;
7090     Offset += Size.getFixedSize() * OpC->getSExtValue();
7091   }
7092 
7093   return Offset;
7094 }
7095 
7096 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
7097                                         const DataLayout &DL) {
7098   Ptr1 = Ptr1->stripPointerCasts();
7099   Ptr2 = Ptr2->stripPointerCasts();
7100 
7101   // Handle the trivial case first.
7102   if (Ptr1 == Ptr2) {
7103     return 0;
7104   }
7105 
7106   const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
7107   const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
7108 
7109   // If one pointer is a GEP see if the GEP is a constant offset from the base,
7110   // as in "P" and "gep P, 1".
7111   // Also do this iteratively to handle the the following case:
7112   //   Ptr_t1 = GEP Ptr1, c1
7113   //   Ptr_t2 = GEP Ptr_t1, c2
7114   //   Ptr2 = GEP Ptr_t2, c3
7115   // where we will return c1+c2+c3.
7116   // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
7117   // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
7118   // are the same, and return the difference between offsets.
7119   auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
7120                                  const Value *Ptr) -> Optional<int64_t> {
7121     const GEPOperator *GEP_T = GEP;
7122     int64_t OffsetVal = 0;
7123     bool HasSameBase = false;
7124     while (GEP_T) {
7125       auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
7126       if (!Offset)
7127         return None;
7128       OffsetVal += *Offset;
7129       auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
7130       if (Op0 == Ptr) {
7131         HasSameBase = true;
7132         break;
7133       }
7134       GEP_T = dyn_cast<GEPOperator>(Op0);
7135     }
7136     if (!HasSameBase)
7137       return None;
7138     return OffsetVal;
7139   };
7140 
7141   if (GEP1) {
7142     auto Offset = getOffsetFromBase(GEP1, Ptr2);
7143     if (Offset)
7144       return -*Offset;
7145   }
7146   if (GEP2) {
7147     auto Offset = getOffsetFromBase(GEP2, Ptr1);
7148     if (Offset)
7149       return Offset;
7150   }
7151 
7152   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
7153   // base.  After that base, they may have some number of common (and
7154   // potentially variable) indices.  After that they handle some constant
7155   // offset, which determines their offset from each other.  At this point, we
7156   // handle no other case.
7157   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
7158     return None;
7159 
7160   // Skip any common indices and track the GEP types.
7161   unsigned Idx = 1;
7162   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
7163     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
7164       break;
7165 
7166   auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
7167   auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
7168   if (!Offset1 || !Offset2)
7169     return None;
7170   return *Offset2 - *Offset1;
7171 }
7172