1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/IntrinsicsAArch64.h"
55 #include "llvm/IR/IntrinsicsX86.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/User.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/KnownBits.h"
69 #include "llvm/Support/MathExtras.h"
70 #include <algorithm>
71 #include <array>
72 #include <cassert>
73 #include <cstdint>
74 #include <iterator>
75 #include <utility>
76 
77 using namespace llvm;
78 using namespace llvm::PatternMatch;
79 
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83                                               cl::Hidden, cl::init(20));
84 
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88   if (unsigned BitWidth = Ty->getScalarSizeInBits())
89     return BitWidth;
90 
91   return DL.getPointerTypeSizeInBits(Ty);
92 }
93 
94 namespace {
95 
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101   const DataLayout &DL;
102   AssumptionCache *AC;
103   const Instruction *CxtI;
104   const DominatorTree *DT;
105 
106   // Unlike the other analyses, this may be a nullptr because not all clients
107   // provide it currently.
108   OptimizationRemarkEmitter *ORE;
109 
110   /// Set of assumptions that should be excluded from further queries.
111   /// This is because of the potential for mutual recursion to cause
112   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113   /// classic case of this is assume(x = y), which will attempt to determine
114   /// bits in x from bits in y, which will attempt to determine bits in y from
115   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117   /// (all of which can call computeKnownBits), and so on.
118   std::array<const Value *, MaxAnalysisRecursionDepth> Excluded;
119 
120   /// If true, it is safe to use metadata during simplification.
121   InstrInfoQuery IIQ;
122 
123   unsigned NumExcluded = 0;
124 
125   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126         const DominatorTree *DT, bool UseInstrInfo,
127         OptimizationRemarkEmitter *ORE = nullptr)
128       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129 
130   Query(const Query &Q, const Value *NewExcl)
131       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132         NumExcluded(Q.NumExcluded) {
133     Excluded = Q.Excluded;
134     Excluded[NumExcluded++] = NewExcl;
135     assert(NumExcluded <= Excluded.size());
136   }
137 
138   bool isExcluded(const Value *Value) const {
139     if (NumExcluded == 0)
140       return false;
141     auto End = Excluded.begin() + NumExcluded;
142     return std::find(Excluded.begin(), End, Value) != End;
143   }
144 };
145 
146 } // end anonymous namespace
147 
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151   // If we've been provided with a context instruction, then use that (provided
152   // it has been inserted).
153   if (CxtI && CxtI->getParent())
154     return CxtI;
155 
156   // If the value is really an already-inserted instruction, then use that.
157   CxtI = dyn_cast<Instruction>(V);
158   if (CxtI && CxtI->getParent())
159     return CxtI;
160 
161   return nullptr;
162 }
163 
164 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
165                                    const APInt &DemandedElts,
166                                    APInt &DemandedLHS, APInt &DemandedRHS) {
167   // The length of scalable vectors is unknown at compile time, thus we
168   // cannot check their values
169   if (isa<ScalableVectorType>(Shuf->getType()))
170     return false;
171 
172   int NumElts =
173       cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
174   int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
175   DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
176   if (DemandedElts.isNullValue())
177     return true;
178   // Simple case of a shuffle with zeroinitializer.
179   if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
180     DemandedLHS.setBit(0);
181     return true;
182   }
183   for (int i = 0; i != NumMaskElts; ++i) {
184     if (!DemandedElts[i])
185       continue;
186     int M = Shuf->getMaskValue(i);
187     assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
188 
189     // For undef elements, we don't know anything about the common state of
190     // the shuffle result.
191     if (M == -1)
192       return false;
193     if (M < NumElts)
194       DemandedLHS.setBit(M % NumElts);
195     else
196       DemandedRHS.setBit(M % NumElts);
197   }
198 
199   return true;
200 }
201 
202 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
203                              KnownBits &Known, unsigned Depth, const Query &Q);
204 
205 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
206                              const Query &Q) {
207   // FIXME: We currently have no way to represent the DemandedElts of a scalable
208   // vector
209   if (isa<ScalableVectorType>(V->getType())) {
210     Known.resetAll();
211     return;
212   }
213 
214   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
215   APInt DemandedElts =
216       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
217   computeKnownBits(V, DemandedElts, Known, Depth, Q);
218 }
219 
220 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
221                             const DataLayout &DL, unsigned Depth,
222                             AssumptionCache *AC, const Instruction *CxtI,
223                             const DominatorTree *DT,
224                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
225   ::computeKnownBits(V, Known, Depth,
226                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
227 }
228 
229 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
230                             KnownBits &Known, const DataLayout &DL,
231                             unsigned Depth, AssumptionCache *AC,
232                             const Instruction *CxtI, const DominatorTree *DT,
233                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
234   ::computeKnownBits(V, DemandedElts, Known, Depth,
235                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
236 }
237 
238 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
239                                   unsigned Depth, const Query &Q);
240 
241 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
242                                   const Query &Q);
243 
244 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
245                                  unsigned Depth, AssumptionCache *AC,
246                                  const Instruction *CxtI,
247                                  const DominatorTree *DT,
248                                  OptimizationRemarkEmitter *ORE,
249                                  bool UseInstrInfo) {
250   return ::computeKnownBits(
251       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
252 }
253 
254 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
255                                  const DataLayout &DL, unsigned Depth,
256                                  AssumptionCache *AC, const Instruction *CxtI,
257                                  const DominatorTree *DT,
258                                  OptimizationRemarkEmitter *ORE,
259                                  bool UseInstrInfo) {
260   return ::computeKnownBits(
261       V, DemandedElts, Depth,
262       Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
263 }
264 
265 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
266                                const DataLayout &DL, AssumptionCache *AC,
267                                const Instruction *CxtI, const DominatorTree *DT,
268                                bool UseInstrInfo) {
269   assert(LHS->getType() == RHS->getType() &&
270          "LHS and RHS should have the same type");
271   assert(LHS->getType()->isIntOrIntVectorTy() &&
272          "LHS and RHS should be integers");
273   // Look for an inverted mask: (X & ~M) op (Y & M).
274   Value *M;
275   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
276       match(RHS, m_c_And(m_Specific(M), m_Value())))
277     return true;
278   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
279       match(LHS, m_c_And(m_Specific(M), m_Value())))
280     return true;
281   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
282   KnownBits LHSKnown(IT->getBitWidth());
283   KnownBits RHSKnown(IT->getBitWidth());
284   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
285   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
286   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
287 }
288 
289 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
290   for (const User *U : CxtI->users()) {
291     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
292       if (IC->isEquality())
293         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
294           if (C->isNullValue())
295             continue;
296     return false;
297   }
298   return true;
299 }
300 
301 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
302                                    const Query &Q);
303 
304 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
305                                   bool OrZero, unsigned Depth,
306                                   AssumptionCache *AC, const Instruction *CxtI,
307                                   const DominatorTree *DT, bool UseInstrInfo) {
308   return ::isKnownToBeAPowerOfTwo(
309       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
310 }
311 
312 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
313                            unsigned Depth, const Query &Q);
314 
315 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
316 
317 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
318                           AssumptionCache *AC, const Instruction *CxtI,
319                           const DominatorTree *DT, bool UseInstrInfo) {
320   return ::isKnownNonZero(V, Depth,
321                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
322 }
323 
324 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
325                               unsigned Depth, AssumptionCache *AC,
326                               const Instruction *CxtI, const DominatorTree *DT,
327                               bool UseInstrInfo) {
328   KnownBits Known =
329       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
330   return Known.isNonNegative();
331 }
332 
333 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
334                            AssumptionCache *AC, const Instruction *CxtI,
335                            const DominatorTree *DT, bool UseInstrInfo) {
336   if (auto *CI = dyn_cast<ConstantInt>(V))
337     return CI->getValue().isStrictlyPositive();
338 
339   // TODO: We'd doing two recursive queries here.  We should factor this such
340   // that only a single query is needed.
341   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
342          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
343 }
344 
345 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
346                            AssumptionCache *AC, const Instruction *CxtI,
347                            const DominatorTree *DT, bool UseInstrInfo) {
348   KnownBits Known =
349       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
350   return Known.isNegative();
351 }
352 
353 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
354 
355 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
356                            const DataLayout &DL, AssumptionCache *AC,
357                            const Instruction *CxtI, const DominatorTree *DT,
358                            bool UseInstrInfo) {
359   return ::isKnownNonEqual(V1, V2,
360                            Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
361                                  UseInstrInfo, /*ORE=*/nullptr));
362 }
363 
364 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
365                               const Query &Q);
366 
367 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
368                              const DataLayout &DL, unsigned Depth,
369                              AssumptionCache *AC, const Instruction *CxtI,
370                              const DominatorTree *DT, bool UseInstrInfo) {
371   return ::MaskedValueIsZero(
372       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
373 }
374 
375 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
376                                    unsigned Depth, const Query &Q);
377 
378 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
379                                    const Query &Q) {
380   // FIXME: We currently have no way to represent the DemandedElts of a scalable
381   // vector
382   if (isa<ScalableVectorType>(V->getType()))
383     return 1;
384 
385   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
386   APInt DemandedElts =
387       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
388   return ComputeNumSignBits(V, DemandedElts, Depth, Q);
389 }
390 
391 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
392                                   unsigned Depth, AssumptionCache *AC,
393                                   const Instruction *CxtI,
394                                   const DominatorTree *DT, bool UseInstrInfo) {
395   return ::ComputeNumSignBits(
396       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
397 }
398 
399 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
400                                    bool NSW, const APInt &DemandedElts,
401                                    KnownBits &KnownOut, KnownBits &Known2,
402                                    unsigned Depth, const Query &Q) {
403   computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
404 
405   // If one operand is unknown and we have no nowrap information,
406   // the result will be unknown independently of the second operand.
407   if (KnownOut.isUnknown() && !NSW)
408     return;
409 
410   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
411   KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
412 }
413 
414 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
415                                 const APInt &DemandedElts, KnownBits &Known,
416                                 KnownBits &Known2, unsigned Depth,
417                                 const Query &Q) {
418   computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
419   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
420 
421   bool isKnownNegative = false;
422   bool isKnownNonNegative = false;
423   // If the multiplication is known not to overflow, compute the sign bit.
424   if (NSW) {
425     if (Op0 == Op1) {
426       // The product of a number with itself is non-negative.
427       isKnownNonNegative = true;
428     } else {
429       bool isKnownNonNegativeOp1 = Known.isNonNegative();
430       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
431       bool isKnownNegativeOp1 = Known.isNegative();
432       bool isKnownNegativeOp0 = Known2.isNegative();
433       // The product of two numbers with the same sign is non-negative.
434       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
435                            (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
436       // The product of a negative number and a non-negative number is either
437       // negative or zero.
438       if (!isKnownNonNegative)
439         isKnownNegative =
440             (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
441              Known2.isNonZero()) ||
442             (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
443     }
444   }
445 
446   Known = KnownBits::computeForMul(Known, Known2);
447 
448   // Only make use of no-wrap flags if we failed to compute the sign bit
449   // directly.  This matters if the multiplication always overflows, in
450   // which case we prefer to follow the result of the direct computation,
451   // though as the program is invoking undefined behaviour we can choose
452   // whatever we like here.
453   if (isKnownNonNegative && !Known.isNegative())
454     Known.makeNonNegative();
455   else if (isKnownNegative && !Known.isNonNegative())
456     Known.makeNegative();
457 }
458 
459 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
460                                              KnownBits &Known) {
461   unsigned BitWidth = Known.getBitWidth();
462   unsigned NumRanges = Ranges.getNumOperands() / 2;
463   assert(NumRanges >= 1);
464 
465   Known.Zero.setAllBits();
466   Known.One.setAllBits();
467 
468   for (unsigned i = 0; i < NumRanges; ++i) {
469     ConstantInt *Lower =
470         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
471     ConstantInt *Upper =
472         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
473     ConstantRange Range(Lower->getValue(), Upper->getValue());
474 
475     // The first CommonPrefixBits of all values in Range are equal.
476     unsigned CommonPrefixBits =
477         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
478     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
479     APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
480     Known.One &= UnsignedMax & Mask;
481     Known.Zero &= ~UnsignedMax & Mask;
482   }
483 }
484 
485 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
486   SmallVector<const Value *, 16> WorkSet(1, I);
487   SmallPtrSet<const Value *, 32> Visited;
488   SmallPtrSet<const Value *, 16> EphValues;
489 
490   // The instruction defining an assumption's condition itself is always
491   // considered ephemeral to that assumption (even if it has other
492   // non-ephemeral users). See r246696's test case for an example.
493   if (is_contained(I->operands(), E))
494     return true;
495 
496   while (!WorkSet.empty()) {
497     const Value *V = WorkSet.pop_back_val();
498     if (!Visited.insert(V).second)
499       continue;
500 
501     // If all uses of this value are ephemeral, then so is this value.
502     if (llvm::all_of(V->users(), [&](const User *U) {
503                                    return EphValues.count(U);
504                                  })) {
505       if (V == E)
506         return true;
507 
508       if (V == I || isSafeToSpeculativelyExecute(V)) {
509        EphValues.insert(V);
510        if (const User *U = dyn_cast<User>(V))
511          for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
512               J != JE; ++J)
513            WorkSet.push_back(*J);
514       }
515     }
516   }
517 
518   return false;
519 }
520 
521 // Is this an intrinsic that cannot be speculated but also cannot trap?
522 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
523   if (const CallInst *CI = dyn_cast<CallInst>(I))
524     if (Function *F = CI->getCalledFunction())
525       switch (F->getIntrinsicID()) {
526       default: break;
527       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
528       case Intrinsic::assume:
529       case Intrinsic::sideeffect:
530       case Intrinsic::dbg_declare:
531       case Intrinsic::dbg_value:
532       case Intrinsic::dbg_label:
533       case Intrinsic::invariant_start:
534       case Intrinsic::invariant_end:
535       case Intrinsic::lifetime_start:
536       case Intrinsic::lifetime_end:
537       case Intrinsic::objectsize:
538       case Intrinsic::ptr_annotation:
539       case Intrinsic::var_annotation:
540         return true;
541       }
542 
543   return false;
544 }
545 
546 bool llvm::isValidAssumeForContext(const Instruction *Inv,
547                                    const Instruction *CxtI,
548                                    const DominatorTree *DT) {
549   // There are two restrictions on the use of an assume:
550   //  1. The assume must dominate the context (or the control flow must
551   //     reach the assume whenever it reaches the context).
552   //  2. The context must not be in the assume's set of ephemeral values
553   //     (otherwise we will use the assume to prove that the condition
554   //     feeding the assume is trivially true, thus causing the removal of
555   //     the assume).
556 
557   if (Inv->getParent() == CxtI->getParent()) {
558     // If Inv and CtxI are in the same block, check if the assume (Inv) is first
559     // in the BB.
560     if (Inv->comesBefore(CxtI))
561       return true;
562 
563     // Don't let an assume affect itself - this would cause the problems
564     // `isEphemeralValueOf` is trying to prevent, and it would also make
565     // the loop below go out of bounds.
566     if (Inv == CxtI)
567       return false;
568 
569     // The context comes first, but they're both in the same block.
570     // Make sure there is nothing in between that might interrupt
571     // the control flow, not even CxtI itself.
572     for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
573       if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
574         return false;
575 
576     return !isEphemeralValueOf(Inv, CxtI);
577   }
578 
579   // Inv and CxtI are in different blocks.
580   if (DT) {
581     if (DT->dominates(Inv, CxtI))
582       return true;
583   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
584     // We don't have a DT, but this trivially dominates.
585     return true;
586   }
587 
588   return false;
589 }
590 
591 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
592   // Use of assumptions is context-sensitive. If we don't have a context, we
593   // cannot use them!
594   if (!Q.AC || !Q.CxtI)
595     return false;
596 
597   // Note that the patterns below need to be kept in sync with the code
598   // in AssumptionCache::updateAffectedValues.
599 
600   auto CmpExcludesZero = [V](ICmpInst *Cmp) {
601     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
602 
603     Value *RHS;
604     CmpInst::Predicate Pred;
605     if (!match(Cmp, m_c_ICmp(Pred, m_V, m_Value(RHS))))
606       return false;
607     // assume(v u> y) -> assume(v != 0)
608     if (Pred == ICmpInst::ICMP_UGT)
609       return true;
610 
611     // assume(v != 0)
612     // We special-case this one to ensure that we handle `assume(v != null)`.
613     if (Pred == ICmpInst::ICMP_NE)
614       return match(RHS, m_Zero());
615 
616     // All other predicates - rely on generic ConstantRange handling.
617     ConstantInt *CI;
618     if (!match(RHS, m_ConstantInt(CI)))
619       return false;
620     ConstantRange RHSRange(CI->getValue());
621     ConstantRange TrueValues =
622         ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
623     return !TrueValues.contains(APInt::getNullValue(CI->getBitWidth()));
624   };
625 
626   if (Q.CxtI && V->getType()->isPointerTy()) {
627     SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
628     if (!NullPointerIsDefined(Q.CxtI->getFunction(),
629                               V->getType()->getPointerAddressSpace()))
630       AttrKinds.push_back(Attribute::Dereferenceable);
631 
632     if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
633       return true;
634   }
635 
636   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
637     if (!AssumeVH)
638       continue;
639     CallInst *I = cast<CallInst>(AssumeVH);
640     assert(I->getFunction() == Q.CxtI->getFunction() &&
641            "Got assumption for the wrong function!");
642     if (Q.isExcluded(I))
643       continue;
644 
645     // Warning: This loop can end up being somewhat performance sensitive.
646     // We're running this loop for once for each value queried resulting in a
647     // runtime of ~O(#assumes * #values).
648 
649     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
650            "must be an assume intrinsic");
651 
652     Value *Arg = I->getArgOperand(0);
653     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
654     if (!Cmp)
655       continue;
656 
657     if (CmpExcludesZero(Cmp) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
658       return true;
659   }
660 
661   return false;
662 }
663 
664 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
665                                        unsigned Depth, const Query &Q) {
666   // Use of assumptions is context-sensitive. If we don't have a context, we
667   // cannot use them!
668   if (!Q.AC || !Q.CxtI)
669     return;
670 
671   unsigned BitWidth = Known.getBitWidth();
672 
673   // Refine Known set if the pointer alignment is set by assume bundles.
674   if (V->getType()->isPointerTy()) {
675     if (RetainedKnowledge RK = getKnowledgeValidInContext(
676             V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
677       Known.Zero.setLowBits(Log2_32(RK.ArgValue));
678     }
679   }
680 
681   // Note that the patterns below need to be kept in sync with the code
682   // in AssumptionCache::updateAffectedValues.
683 
684   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
685     if (!AssumeVH)
686       continue;
687     CallInst *I = cast<CallInst>(AssumeVH);
688     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
689            "Got assumption for the wrong function!");
690     if (Q.isExcluded(I))
691       continue;
692 
693     // Warning: This loop can end up being somewhat performance sensitive.
694     // We're running this loop for once for each value queried resulting in a
695     // runtime of ~O(#assumes * #values).
696 
697     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
698            "must be an assume intrinsic");
699 
700     Value *Arg = I->getArgOperand(0);
701 
702     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
703       assert(BitWidth == 1 && "assume operand is not i1?");
704       Known.setAllOnes();
705       return;
706     }
707     if (match(Arg, m_Not(m_Specific(V))) &&
708         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
709       assert(BitWidth == 1 && "assume operand is not i1?");
710       Known.setAllZero();
711       return;
712     }
713 
714     // The remaining tests are all recursive, so bail out if we hit the limit.
715     if (Depth == MaxAnalysisRecursionDepth)
716       continue;
717 
718     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
719     if (!Cmp)
720       continue;
721 
722     // Note that ptrtoint may change the bitwidth.
723     Value *A, *B;
724     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
725 
726     CmpInst::Predicate Pred;
727     uint64_t C;
728     switch (Cmp->getPredicate()) {
729     default:
730       break;
731     case ICmpInst::ICMP_EQ:
732       // assume(v = a)
733       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
734           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
735         KnownBits RHSKnown =
736             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
737         Known.Zero |= RHSKnown.Zero;
738         Known.One  |= RHSKnown.One;
739       // assume(v & b = a)
740       } else if (match(Cmp,
741                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
742                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
743         KnownBits RHSKnown =
744             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
745         KnownBits MaskKnown =
746             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
747 
748         // For those bits in the mask that are known to be one, we can propagate
749         // known bits from the RHS to V.
750         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
751         Known.One  |= RHSKnown.One  & MaskKnown.One;
752       // assume(~(v & b) = a)
753       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
754                                      m_Value(A))) &&
755                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
756         KnownBits RHSKnown =
757             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
758         KnownBits MaskKnown =
759             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
760 
761         // For those bits in the mask that are known to be one, we can propagate
762         // inverted known bits from the RHS to V.
763         Known.Zero |= RHSKnown.One  & MaskKnown.One;
764         Known.One  |= RHSKnown.Zero & MaskKnown.One;
765       // assume(v | b = a)
766       } else if (match(Cmp,
767                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
768                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
769         KnownBits RHSKnown =
770             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
771         KnownBits BKnown =
772             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
773 
774         // For those bits in B that are known to be zero, we can propagate known
775         // bits from the RHS to V.
776         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
777         Known.One  |= RHSKnown.One  & BKnown.Zero;
778       // assume(~(v | b) = a)
779       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
780                                      m_Value(A))) &&
781                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
782         KnownBits RHSKnown =
783             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
784         KnownBits BKnown =
785             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
786 
787         // For those bits in B that are known to be zero, we can propagate
788         // inverted known bits from the RHS to V.
789         Known.Zero |= RHSKnown.One  & BKnown.Zero;
790         Known.One  |= RHSKnown.Zero & BKnown.Zero;
791       // assume(v ^ b = a)
792       } else if (match(Cmp,
793                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
794                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
795         KnownBits RHSKnown =
796             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
797         KnownBits BKnown =
798             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
799 
800         // For those bits in B that are known to be zero, we can propagate known
801         // bits from the RHS to V. For those bits in B that are known to be one,
802         // we can propagate inverted known bits from the RHS to V.
803         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
804         Known.One  |= RHSKnown.One  & BKnown.Zero;
805         Known.Zero |= RHSKnown.One  & BKnown.One;
806         Known.One  |= RHSKnown.Zero & BKnown.One;
807       // assume(~(v ^ b) = a)
808       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
809                                      m_Value(A))) &&
810                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
811         KnownBits RHSKnown =
812             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
813         KnownBits BKnown =
814             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
815 
816         // For those bits in B that are known to be zero, we can propagate
817         // inverted known bits from the RHS to V. For those bits in B that are
818         // known to be one, we can propagate known bits from the RHS to V.
819         Known.Zero |= RHSKnown.One  & BKnown.Zero;
820         Known.One  |= RHSKnown.Zero & BKnown.Zero;
821         Known.Zero |= RHSKnown.Zero & BKnown.One;
822         Known.One  |= RHSKnown.One  & BKnown.One;
823       // assume(v << c = a)
824       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
825                                      m_Value(A))) &&
826                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
827         KnownBits RHSKnown =
828             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
829 
830         // For those bits in RHS that are known, we can propagate them to known
831         // bits in V shifted to the right by C.
832         RHSKnown.Zero.lshrInPlace(C);
833         Known.Zero |= RHSKnown.Zero;
834         RHSKnown.One.lshrInPlace(C);
835         Known.One  |= RHSKnown.One;
836       // assume(~(v << c) = a)
837       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
838                                      m_Value(A))) &&
839                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
840         KnownBits RHSKnown =
841             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
842         // For those bits in RHS that are known, we can propagate them inverted
843         // to known bits in V shifted to the right by C.
844         RHSKnown.One.lshrInPlace(C);
845         Known.Zero |= RHSKnown.One;
846         RHSKnown.Zero.lshrInPlace(C);
847         Known.One  |= RHSKnown.Zero;
848       // assume(v >> c = a)
849       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
850                                      m_Value(A))) &&
851                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
852         KnownBits RHSKnown =
853             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
854         // For those bits in RHS that are known, we can propagate them to known
855         // bits in V shifted to the right by C.
856         Known.Zero |= RHSKnown.Zero << C;
857         Known.One  |= RHSKnown.One  << C;
858       // assume(~(v >> c) = a)
859       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
860                                      m_Value(A))) &&
861                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
862         KnownBits RHSKnown =
863             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
864         // For those bits in RHS that are known, we can propagate them inverted
865         // to known bits in V shifted to the right by C.
866         Known.Zero |= RHSKnown.One  << C;
867         Known.One  |= RHSKnown.Zero << C;
868       }
869       break;
870     case ICmpInst::ICMP_SGE:
871       // assume(v >=_s c) where c is non-negative
872       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
873           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
874         KnownBits RHSKnown =
875             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
876 
877         if (RHSKnown.isNonNegative()) {
878           // We know that the sign bit is zero.
879           Known.makeNonNegative();
880         }
881       }
882       break;
883     case ICmpInst::ICMP_SGT:
884       // assume(v >_s c) where c is at least -1.
885       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
886           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
887         KnownBits RHSKnown =
888             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
889 
890         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
891           // We know that the sign bit is zero.
892           Known.makeNonNegative();
893         }
894       }
895       break;
896     case ICmpInst::ICMP_SLE:
897       // assume(v <=_s c) where c is negative
898       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
899           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
900         KnownBits RHSKnown =
901             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
902 
903         if (RHSKnown.isNegative()) {
904           // We know that the sign bit is one.
905           Known.makeNegative();
906         }
907       }
908       break;
909     case ICmpInst::ICMP_SLT:
910       // assume(v <_s c) where c is non-positive
911       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
912           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
913         KnownBits RHSKnown =
914             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
915 
916         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
917           // We know that the sign bit is one.
918           Known.makeNegative();
919         }
920       }
921       break;
922     case ICmpInst::ICMP_ULE:
923       // assume(v <=_u c)
924       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
925           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
926         KnownBits RHSKnown =
927             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
928 
929         // Whatever high bits in c are zero are known to be zero.
930         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
931       }
932       break;
933     case ICmpInst::ICMP_ULT:
934       // assume(v <_u c)
935       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
936           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
937         KnownBits RHSKnown =
938             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
939 
940         // If the RHS is known zero, then this assumption must be wrong (nothing
941         // is unsigned less than zero). Signal a conflict and get out of here.
942         if (RHSKnown.isZero()) {
943           Known.Zero.setAllBits();
944           Known.One.setAllBits();
945           break;
946         }
947 
948         // Whatever high bits in c are zero are known to be zero (if c is a power
949         // of 2, then one more).
950         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
951           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
952         else
953           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
954       }
955       break;
956     }
957   }
958 
959   // If assumptions conflict with each other or previous known bits, then we
960   // have a logical fallacy. It's possible that the assumption is not reachable,
961   // so this isn't a real bug. On the other hand, the program may have undefined
962   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
963   // clear out the known bits, try to warn the user, and hope for the best.
964   if (Known.Zero.intersects(Known.One)) {
965     Known.resetAll();
966 
967     if (Q.ORE)
968       Q.ORE->emit([&]() {
969         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
970         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
971                                           CxtI)
972                << "Detected conflicting code assumptions. Program may "
973                   "have undefined behavior, or compiler may have "
974                   "internal error.";
975       });
976   }
977 }
978 
979 /// Compute known bits from a shift operator, including those with a
980 /// non-constant shift amount. Known is the output of this function. Known2 is a
981 /// pre-allocated temporary with the same bit width as Known and on return
982 /// contains the known bit of the shift value source. KF is an
983 /// operator-specific function that, given the known-bits and a shift amount,
984 /// compute the implied known-bits of the shift operator's result respectively
985 /// for that shift amount. The results from calling KF are conservatively
986 /// combined for all permitted shift amounts.
987 static void computeKnownBitsFromShiftOperator(
988     const Operator *I, const APInt &DemandedElts, KnownBits &Known,
989     KnownBits &Known2, unsigned Depth, const Query &Q,
990     function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
991   unsigned BitWidth = Known.getBitWidth();
992   computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
993   computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
994 
995   if (Known.isConstant()) {
996     Known = KF(Known2, Known);
997 
998     // If the known bits conflict, this must be an overflowing left shift, so
999     // the shift result is poison. We can return anything we want. Choose 0 for
1000     // the best folding opportunity.
1001     if (Known.hasConflict())
1002       Known.setAllZero();
1003 
1004     return;
1005   }
1006 
1007   // If the shift amount could be greater than or equal to the bit-width of the
1008   // LHS, the value could be poison, but bail out because the check below is
1009   // expensive.
1010   // TODO: Should we just carry on?
1011   if (Known.getMaxValue().uge(BitWidth)) {
1012     Known.resetAll();
1013     return;
1014   }
1015 
1016   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
1017   // BitWidth > 64 and any upper bits are known, we'll end up returning the
1018   // limit value (which implies all bits are known).
1019   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
1020   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
1021 
1022   // It would be more-clearly correct to use the two temporaries for this
1023   // calculation. Reusing the APInts here to prevent unnecessary allocations.
1024   Known.resetAll();
1025 
1026   // If we know the shifter operand is nonzero, we can sometimes infer more
1027   // known bits. However this is expensive to compute, so be lazy about it and
1028   // only compute it when absolutely necessary.
1029   Optional<bool> ShifterOperandIsNonZero;
1030 
1031   // Early exit if we can't constrain any well-defined shift amount.
1032   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1033       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1034     ShifterOperandIsNonZero =
1035         isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1036     if (!*ShifterOperandIsNonZero)
1037       return;
1038   }
1039 
1040   Known.Zero.setAllBits();
1041   Known.One.setAllBits();
1042   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1043     // Combine the shifted known input bits only for those shift amounts
1044     // compatible with its known constraints.
1045     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1046       continue;
1047     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1048       continue;
1049     // If we know the shifter is nonzero, we may be able to infer more known
1050     // bits. This check is sunk down as far as possible to avoid the expensive
1051     // call to isKnownNonZero if the cheaper checks above fail.
1052     if (ShiftAmt == 0) {
1053       if (!ShifterOperandIsNonZero.hasValue())
1054         ShifterOperandIsNonZero =
1055             isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1056       if (*ShifterOperandIsNonZero)
1057         continue;
1058     }
1059 
1060     Known = KnownBits::commonBits(
1061         Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1062   }
1063 
1064   // If the known bits conflict, the result is poison. Return a 0 and hope the
1065   // caller can further optimize that.
1066   if (Known.hasConflict())
1067     Known.setAllZero();
1068 }
1069 
1070 static void computeKnownBitsFromOperator(const Operator *I,
1071                                          const APInt &DemandedElts,
1072                                          KnownBits &Known, unsigned Depth,
1073                                          const Query &Q) {
1074   unsigned BitWidth = Known.getBitWidth();
1075 
1076   KnownBits Known2(BitWidth);
1077   switch (I->getOpcode()) {
1078   default: break;
1079   case Instruction::Load:
1080     if (MDNode *MD =
1081             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1082       computeKnownBitsFromRangeMetadata(*MD, Known);
1083     break;
1084   case Instruction::And: {
1085     // If either the LHS or the RHS are Zero, the result is zero.
1086     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1087     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1088 
1089     Known &= Known2;
1090 
1091     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1092     // here we handle the more general case of adding any odd number by
1093     // matching the form add(x, add(x, y)) where y is odd.
1094     // TODO: This could be generalized to clearing any bit set in y where the
1095     // following bit is known to be unset in y.
1096     Value *X = nullptr, *Y = nullptr;
1097     if (!Known.Zero[0] && !Known.One[0] &&
1098         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1099       Known2.resetAll();
1100       computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1101       if (Known2.countMinTrailingOnes() > 0)
1102         Known.Zero.setBit(0);
1103     }
1104     break;
1105   }
1106   case Instruction::Or:
1107     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1108     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1109 
1110     Known |= Known2;
1111     break;
1112   case Instruction::Xor:
1113     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1114     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1115 
1116     Known ^= Known2;
1117     break;
1118   case Instruction::Mul: {
1119     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1120     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1121                         Known, Known2, Depth, Q);
1122     break;
1123   }
1124   case Instruction::UDiv: {
1125     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1126     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1127     Known = KnownBits::udiv(Known, Known2);
1128     break;
1129   }
1130   case Instruction::Select: {
1131     const Value *LHS = nullptr, *RHS = nullptr;
1132     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1133     if (SelectPatternResult::isMinOrMax(SPF)) {
1134       computeKnownBits(RHS, Known, Depth + 1, Q);
1135       computeKnownBits(LHS, Known2, Depth + 1, Q);
1136       switch (SPF) {
1137       default:
1138         llvm_unreachable("Unhandled select pattern flavor!");
1139       case SPF_SMAX:
1140         Known = KnownBits::smax(Known, Known2);
1141         break;
1142       case SPF_SMIN:
1143         Known = KnownBits::smin(Known, Known2);
1144         break;
1145       case SPF_UMAX:
1146         Known = KnownBits::umax(Known, Known2);
1147         break;
1148       case SPF_UMIN:
1149         Known = KnownBits::umin(Known, Known2);
1150         break;
1151       }
1152       break;
1153     }
1154 
1155     computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1156     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1157 
1158     // Only known if known in both the LHS and RHS.
1159     Known = KnownBits::commonBits(Known, Known2);
1160 
1161     if (SPF == SPF_ABS) {
1162       // RHS from matchSelectPattern returns the negation part of abs pattern.
1163       // If the negate has an NSW flag we can assume the sign bit of the result
1164       // will be 0 because that makes abs(INT_MIN) undefined.
1165       if (match(RHS, m_Neg(m_Specific(LHS))) &&
1166           Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1167         Known.Zero.setSignBit();
1168     }
1169 
1170     break;
1171   }
1172   case Instruction::FPTrunc:
1173   case Instruction::FPExt:
1174   case Instruction::FPToUI:
1175   case Instruction::FPToSI:
1176   case Instruction::SIToFP:
1177   case Instruction::UIToFP:
1178     break; // Can't work with floating point.
1179   case Instruction::PtrToInt:
1180   case Instruction::IntToPtr:
1181     // Fall through and handle them the same as zext/trunc.
1182     LLVM_FALLTHROUGH;
1183   case Instruction::ZExt:
1184   case Instruction::Trunc: {
1185     Type *SrcTy = I->getOperand(0)->getType();
1186 
1187     unsigned SrcBitWidth;
1188     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1189     // which fall through here.
1190     Type *ScalarTy = SrcTy->getScalarType();
1191     SrcBitWidth = ScalarTy->isPointerTy() ?
1192       Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1193       Q.DL.getTypeSizeInBits(ScalarTy);
1194 
1195     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1196     Known = Known.anyextOrTrunc(SrcBitWidth);
1197     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1198     Known = Known.zextOrTrunc(BitWidth);
1199     break;
1200   }
1201   case Instruction::BitCast: {
1202     Type *SrcTy = I->getOperand(0)->getType();
1203     if (SrcTy->isIntOrPtrTy() &&
1204         // TODO: For now, not handling conversions like:
1205         // (bitcast i64 %x to <2 x i32>)
1206         !I->getType()->isVectorTy()) {
1207       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1208       break;
1209     }
1210     break;
1211   }
1212   case Instruction::SExt: {
1213     // Compute the bits in the result that are not present in the input.
1214     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1215 
1216     Known = Known.trunc(SrcBitWidth);
1217     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1218     // If the sign bit of the input is known set or clear, then we know the
1219     // top bits of the result.
1220     Known = Known.sext(BitWidth);
1221     break;
1222   }
1223   case Instruction::Shl: {
1224     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1225     auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1226       KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1227       // If this shift has "nsw" keyword, then the result is either a poison
1228       // value or has the same sign bit as the first operand.
1229       if (NSW) {
1230         if (KnownVal.Zero.isSignBitSet())
1231           Result.Zero.setSignBit();
1232         if (KnownVal.One.isSignBitSet())
1233           Result.One.setSignBit();
1234       }
1235       return Result;
1236     };
1237     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1238                                       KF);
1239     break;
1240   }
1241   case Instruction::LShr: {
1242     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1243       return KnownBits::lshr(KnownVal, KnownAmt);
1244     };
1245     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1246                                       KF);
1247     break;
1248   }
1249   case Instruction::AShr: {
1250     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1251       return KnownBits::ashr(KnownVal, KnownAmt);
1252     };
1253     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1254                                       KF);
1255     break;
1256   }
1257   case Instruction::Sub: {
1258     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1259     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1260                            DemandedElts, Known, Known2, Depth, Q);
1261     break;
1262   }
1263   case Instruction::Add: {
1264     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1265     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1266                            DemandedElts, Known, Known2, Depth, Q);
1267     break;
1268   }
1269   case Instruction::SRem:
1270     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1271     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1272     Known = KnownBits::srem(Known, Known2);
1273     break;
1274 
1275   case Instruction::URem:
1276     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1277     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1278     Known = KnownBits::urem(Known, Known2);
1279     break;
1280   case Instruction::Alloca:
1281     Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1282     break;
1283   case Instruction::GetElementPtr: {
1284     // Analyze all of the subscripts of this getelementptr instruction
1285     // to determine if we can prove known low zero bits.
1286     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1287     // Accumulate the constant indices in a separate variable
1288     // to minimize the number of calls to computeForAddSub.
1289     APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1290 
1291     gep_type_iterator GTI = gep_type_begin(I);
1292     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1293       // TrailZ can only become smaller, short-circuit if we hit zero.
1294       if (Known.isUnknown())
1295         break;
1296 
1297       Value *Index = I->getOperand(i);
1298 
1299       // Handle case when index is zero.
1300       Constant *CIndex = dyn_cast<Constant>(Index);
1301       if (CIndex && CIndex->isZeroValue())
1302         continue;
1303 
1304       if (StructType *STy = GTI.getStructTypeOrNull()) {
1305         // Handle struct member offset arithmetic.
1306 
1307         assert(CIndex &&
1308                "Access to structure field must be known at compile time");
1309 
1310         if (CIndex->getType()->isVectorTy())
1311           Index = CIndex->getSplatValue();
1312 
1313         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1314         const StructLayout *SL = Q.DL.getStructLayout(STy);
1315         uint64_t Offset = SL->getElementOffset(Idx);
1316         AccConstIndices += Offset;
1317         continue;
1318       }
1319 
1320       // Handle array index arithmetic.
1321       Type *IndexedTy = GTI.getIndexedType();
1322       if (!IndexedTy->isSized()) {
1323         Known.resetAll();
1324         break;
1325       }
1326 
1327       unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1328       KnownBits IndexBits(IndexBitWidth);
1329       computeKnownBits(Index, IndexBits, Depth + 1, Q);
1330       TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1331       uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1332       KnownBits ScalingFactor(IndexBitWidth);
1333       // Multiply by current sizeof type.
1334       // &A[i] == A + i * sizeof(*A[i]).
1335       if (IndexTypeSize.isScalable()) {
1336         // For scalable types the only thing we know about sizeof is
1337         // that this is a multiple of the minimum size.
1338         ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1339       } else if (IndexBits.isConstant()) {
1340         APInt IndexConst = IndexBits.getConstant();
1341         APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1342         IndexConst *= ScalingFactor;
1343         AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1344         continue;
1345       } else {
1346         ScalingFactor.Zero = ~TypeSizeInBytes;
1347         ScalingFactor.One = TypeSizeInBytes;
1348       }
1349       IndexBits = KnownBits::computeForMul(IndexBits, ScalingFactor);
1350 
1351       // If the offsets have a different width from the pointer, according
1352       // to the language reference we need to sign-extend or truncate them
1353       // to the width of the pointer.
1354       IndexBits = IndexBits.sextOrTrunc(BitWidth);
1355 
1356       // Note that inbounds does *not* guarantee nsw for the addition, as only
1357       // the offset is signed, while the base address is unsigned.
1358       Known = KnownBits::computeForAddSub(
1359           /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1360     }
1361     if (!Known.isUnknown() && !AccConstIndices.isNullValue()) {
1362       KnownBits Index(BitWidth);
1363       Index.Zero = ~AccConstIndices;
1364       Index.One = AccConstIndices;
1365       Known = KnownBits::computeForAddSub(
1366           /*Add=*/true, /*NSW=*/false, Known, Index);
1367     }
1368     break;
1369   }
1370   case Instruction::PHI: {
1371     const PHINode *P = cast<PHINode>(I);
1372     // Handle the case of a simple two-predecessor recurrence PHI.
1373     // There's a lot more that could theoretically be done here, but
1374     // this is sufficient to catch some interesting cases.
1375     if (P->getNumIncomingValues() == 2) {
1376       for (unsigned i = 0; i != 2; ++i) {
1377         Value *L = P->getIncomingValue(i);
1378         Value *R = P->getIncomingValue(!i);
1379         Instruction *RInst = P->getIncomingBlock(!i)->getTerminator();
1380         Instruction *LInst = P->getIncomingBlock(i)->getTerminator();
1381         Operator *LU = dyn_cast<Operator>(L);
1382         if (!LU)
1383           continue;
1384         unsigned Opcode = LU->getOpcode();
1385         // Check for operations that have the property that if
1386         // both their operands have low zero bits, the result
1387         // will have low zero bits.
1388         if (Opcode == Instruction::Add ||
1389             Opcode == Instruction::Sub ||
1390             Opcode == Instruction::And ||
1391             Opcode == Instruction::Or ||
1392             Opcode == Instruction::Mul) {
1393           Value *LL = LU->getOperand(0);
1394           Value *LR = LU->getOperand(1);
1395           // Find a recurrence.
1396           if (LL == I)
1397             L = LR;
1398           else if (LR == I)
1399             L = LL;
1400           else
1401             continue; // Check for recurrence with L and R flipped.
1402 
1403           // Change the context instruction to the "edge" that flows into the
1404           // phi. This is important because that is where the value is actually
1405           // "evaluated" even though it is used later somewhere else. (see also
1406           // D69571).
1407           Query RecQ = Q;
1408 
1409           // Ok, we have a PHI of the form L op= R. Check for low
1410           // zero bits.
1411           RecQ.CxtI = RInst;
1412           computeKnownBits(R, Known2, Depth + 1, RecQ);
1413 
1414           // We need to take the minimum number of known bits
1415           KnownBits Known3(BitWidth);
1416           RecQ.CxtI = LInst;
1417           computeKnownBits(L, Known3, Depth + 1, RecQ);
1418 
1419           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1420                                          Known3.countMinTrailingZeros()));
1421 
1422           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1423           if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1424             // If initial value of recurrence is nonnegative, and we are adding
1425             // a nonnegative number with nsw, the result can only be nonnegative
1426             // or poison value regardless of the number of times we execute the
1427             // add in phi recurrence. If initial value is negative and we are
1428             // adding a negative number with nsw, the result can only be
1429             // negative or poison value. Similar arguments apply to sub and mul.
1430             //
1431             // (add non-negative, non-negative) --> non-negative
1432             // (add negative, negative) --> negative
1433             if (Opcode == Instruction::Add) {
1434               if (Known2.isNonNegative() && Known3.isNonNegative())
1435                 Known.makeNonNegative();
1436               else if (Known2.isNegative() && Known3.isNegative())
1437                 Known.makeNegative();
1438             }
1439 
1440             // (sub nsw non-negative, negative) --> non-negative
1441             // (sub nsw negative, non-negative) --> negative
1442             else if (Opcode == Instruction::Sub && LL == I) {
1443               if (Known2.isNonNegative() && Known3.isNegative())
1444                 Known.makeNonNegative();
1445               else if (Known2.isNegative() && Known3.isNonNegative())
1446                 Known.makeNegative();
1447             }
1448 
1449             // (mul nsw non-negative, non-negative) --> non-negative
1450             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1451                      Known3.isNonNegative())
1452               Known.makeNonNegative();
1453           }
1454 
1455           break;
1456         }
1457       }
1458     }
1459 
1460     // Unreachable blocks may have zero-operand PHI nodes.
1461     if (P->getNumIncomingValues() == 0)
1462       break;
1463 
1464     // Otherwise take the unions of the known bit sets of the operands,
1465     // taking conservative care to avoid excessive recursion.
1466     if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1467       // Skip if every incoming value references to ourself.
1468       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1469         break;
1470 
1471       Known.Zero.setAllBits();
1472       Known.One.setAllBits();
1473       for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1474         Value *IncValue = P->getIncomingValue(u);
1475         // Skip direct self references.
1476         if (IncValue == P) continue;
1477 
1478         // Change the context instruction to the "edge" that flows into the
1479         // phi. This is important because that is where the value is actually
1480         // "evaluated" even though it is used later somewhere else. (see also
1481         // D69571).
1482         Query RecQ = Q;
1483         RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1484 
1485         Known2 = KnownBits(BitWidth);
1486         // Recurse, but cap the recursion to one level, because we don't
1487         // want to waste time spinning around in loops.
1488         computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1489         Known = KnownBits::commonBits(Known, Known2);
1490         // If all bits have been ruled out, there's no need to check
1491         // more operands.
1492         if (Known.isUnknown())
1493           break;
1494       }
1495     }
1496     break;
1497   }
1498   case Instruction::Call:
1499   case Instruction::Invoke:
1500     // If range metadata is attached to this call, set known bits from that,
1501     // and then intersect with known bits based on other properties of the
1502     // function.
1503     if (MDNode *MD =
1504             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1505       computeKnownBitsFromRangeMetadata(*MD, Known);
1506     if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1507       computeKnownBits(RV, Known2, Depth + 1, Q);
1508       Known.Zero |= Known2.Zero;
1509       Known.One |= Known2.One;
1510     }
1511     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1512       switch (II->getIntrinsicID()) {
1513       default: break;
1514       case Intrinsic::abs:
1515         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1516 
1517         // If the source's MSB is zero then we know the rest of the bits.
1518         if (Known2.isNonNegative()) {
1519           Known.Zero |= Known2.Zero;
1520           Known.One |= Known2.One;
1521           break;
1522         }
1523 
1524         // Absolute value preserves trailing zero count.
1525         Known.Zero.setLowBits(Known2.Zero.countTrailingOnes());
1526 
1527         // If this call is undefined for INT_MIN, the result is positive. We
1528         // also know it can't be INT_MIN if there is a set bit that isn't the
1529         // sign bit.
1530         Known2.One.clearSignBit();
1531         if (match(II->getArgOperand(1), m_One()) || Known2.One.getBoolValue())
1532           Known.Zero.setSignBit();
1533         // FIXME: Handle known negative input?
1534         // FIXME: Calculate the negated Known bits and combine them?
1535         break;
1536       case Intrinsic::bitreverse:
1537         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1538         Known.Zero |= Known2.Zero.reverseBits();
1539         Known.One |= Known2.One.reverseBits();
1540         break;
1541       case Intrinsic::bswap:
1542         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1543         Known.Zero |= Known2.Zero.byteSwap();
1544         Known.One |= Known2.One.byteSwap();
1545         break;
1546       case Intrinsic::ctlz: {
1547         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1548         // If we have a known 1, its position is our upper bound.
1549         unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1550         // If this call is undefined for 0, the result will be less than 2^n.
1551         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1552           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1553         unsigned LowBits = Log2_32(PossibleLZ)+1;
1554         Known.Zero.setBitsFrom(LowBits);
1555         break;
1556       }
1557       case Intrinsic::cttz: {
1558         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1559         // If we have a known 1, its position is our upper bound.
1560         unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1561         // If this call is undefined for 0, the result will be less than 2^n.
1562         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1563           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1564         unsigned LowBits = Log2_32(PossibleTZ)+1;
1565         Known.Zero.setBitsFrom(LowBits);
1566         break;
1567       }
1568       case Intrinsic::ctpop: {
1569         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1570         // We can bound the space the count needs.  Also, bits known to be zero
1571         // can't contribute to the population.
1572         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1573         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1574         Known.Zero.setBitsFrom(LowBits);
1575         // TODO: we could bound KnownOne using the lower bound on the number
1576         // of bits which might be set provided by popcnt KnownOne2.
1577         break;
1578       }
1579       case Intrinsic::fshr:
1580       case Intrinsic::fshl: {
1581         const APInt *SA;
1582         if (!match(I->getOperand(2), m_APInt(SA)))
1583           break;
1584 
1585         // Normalize to funnel shift left.
1586         uint64_t ShiftAmt = SA->urem(BitWidth);
1587         if (II->getIntrinsicID() == Intrinsic::fshr)
1588           ShiftAmt = BitWidth - ShiftAmt;
1589 
1590         KnownBits Known3(BitWidth);
1591         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1592         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1593 
1594         Known.Zero =
1595             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1596         Known.One =
1597             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1598         break;
1599       }
1600       case Intrinsic::uadd_sat:
1601       case Intrinsic::usub_sat: {
1602         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1603         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1604         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1605 
1606         // Add: Leading ones of either operand are preserved.
1607         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1608         // as leading zeros in the result.
1609         unsigned LeadingKnown;
1610         if (IsAdd)
1611           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1612                                   Known2.countMinLeadingOnes());
1613         else
1614           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1615                                   Known2.countMinLeadingOnes());
1616 
1617         Known = KnownBits::computeForAddSub(
1618             IsAdd, /* NSW */ false, Known, Known2);
1619 
1620         // We select between the operation result and all-ones/zero
1621         // respectively, so we can preserve known ones/zeros.
1622         if (IsAdd) {
1623           Known.One.setHighBits(LeadingKnown);
1624           Known.Zero.clearAllBits();
1625         } else {
1626           Known.Zero.setHighBits(LeadingKnown);
1627           Known.One.clearAllBits();
1628         }
1629         break;
1630       }
1631       case Intrinsic::umin:
1632         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1633         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1634         Known = KnownBits::umin(Known, Known2);
1635         break;
1636       case Intrinsic::umax:
1637         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1638         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1639         Known = KnownBits::umax(Known, Known2);
1640         break;
1641       case Intrinsic::smin:
1642         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1643         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1644         Known = KnownBits::smin(Known, Known2);
1645         break;
1646       case Intrinsic::smax:
1647         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1648         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1649         Known = KnownBits::smax(Known, Known2);
1650         break;
1651       case Intrinsic::x86_sse42_crc32_64_64:
1652         Known.Zero.setBitsFrom(32);
1653         break;
1654       }
1655     }
1656     break;
1657   case Instruction::ShuffleVector: {
1658     auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1659     // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1660     if (!Shuf) {
1661       Known.resetAll();
1662       return;
1663     }
1664     // For undef elements, we don't know anything about the common state of
1665     // the shuffle result.
1666     APInt DemandedLHS, DemandedRHS;
1667     if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1668       Known.resetAll();
1669       return;
1670     }
1671     Known.One.setAllBits();
1672     Known.Zero.setAllBits();
1673     if (!!DemandedLHS) {
1674       const Value *LHS = Shuf->getOperand(0);
1675       computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1676       // If we don't know any bits, early out.
1677       if (Known.isUnknown())
1678         break;
1679     }
1680     if (!!DemandedRHS) {
1681       const Value *RHS = Shuf->getOperand(1);
1682       computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1683       Known = KnownBits::commonBits(Known, Known2);
1684     }
1685     break;
1686   }
1687   case Instruction::InsertElement: {
1688     const Value *Vec = I->getOperand(0);
1689     const Value *Elt = I->getOperand(1);
1690     auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1691     // Early out if the index is non-constant or out-of-range.
1692     unsigned NumElts = DemandedElts.getBitWidth();
1693     if (!CIdx || CIdx->getValue().uge(NumElts)) {
1694       Known.resetAll();
1695       return;
1696     }
1697     Known.One.setAllBits();
1698     Known.Zero.setAllBits();
1699     unsigned EltIdx = CIdx->getZExtValue();
1700     // Do we demand the inserted element?
1701     if (DemandedElts[EltIdx]) {
1702       computeKnownBits(Elt, Known, Depth + 1, Q);
1703       // If we don't know any bits, early out.
1704       if (Known.isUnknown())
1705         break;
1706     }
1707     // We don't need the base vector element that has been inserted.
1708     APInt DemandedVecElts = DemandedElts;
1709     DemandedVecElts.clearBit(EltIdx);
1710     if (!!DemandedVecElts) {
1711       computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1712       Known = KnownBits::commonBits(Known, Known2);
1713     }
1714     break;
1715   }
1716   case Instruction::ExtractElement: {
1717     // Look through extract element. If the index is non-constant or
1718     // out-of-range demand all elements, otherwise just the extracted element.
1719     const Value *Vec = I->getOperand(0);
1720     const Value *Idx = I->getOperand(1);
1721     auto *CIdx = dyn_cast<ConstantInt>(Idx);
1722     if (isa<ScalableVectorType>(Vec->getType())) {
1723       // FIXME: there's probably *something* we can do with scalable vectors
1724       Known.resetAll();
1725       break;
1726     }
1727     unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1728     APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1729     if (CIdx && CIdx->getValue().ult(NumElts))
1730       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1731     computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1732     break;
1733   }
1734   case Instruction::ExtractValue:
1735     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1736       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1737       if (EVI->getNumIndices() != 1) break;
1738       if (EVI->getIndices()[0] == 0) {
1739         switch (II->getIntrinsicID()) {
1740         default: break;
1741         case Intrinsic::uadd_with_overflow:
1742         case Intrinsic::sadd_with_overflow:
1743           computeKnownBitsAddSub(true, II->getArgOperand(0),
1744                                  II->getArgOperand(1), false, DemandedElts,
1745                                  Known, Known2, Depth, Q);
1746           break;
1747         case Intrinsic::usub_with_overflow:
1748         case Intrinsic::ssub_with_overflow:
1749           computeKnownBitsAddSub(false, II->getArgOperand(0),
1750                                  II->getArgOperand(1), false, DemandedElts,
1751                                  Known, Known2, Depth, Q);
1752           break;
1753         case Intrinsic::umul_with_overflow:
1754         case Intrinsic::smul_with_overflow:
1755           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1756                               DemandedElts, Known, Known2, Depth, Q);
1757           break;
1758         }
1759       }
1760     }
1761     break;
1762   case Instruction::Freeze:
1763     if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1764                                   Depth + 1))
1765       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1766     break;
1767   }
1768 }
1769 
1770 /// Determine which bits of V are known to be either zero or one and return
1771 /// them.
1772 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1773                            unsigned Depth, const Query &Q) {
1774   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1775   computeKnownBits(V, DemandedElts, Known, Depth, Q);
1776   return Known;
1777 }
1778 
1779 /// Determine which bits of V are known to be either zero or one and return
1780 /// them.
1781 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1782   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1783   computeKnownBits(V, Known, Depth, Q);
1784   return Known;
1785 }
1786 
1787 /// Determine which bits of V are known to be either zero or one and return
1788 /// them in the Known bit set.
1789 ///
1790 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1791 /// we cannot optimize based on the assumption that it is zero without changing
1792 /// it to be an explicit zero.  If we don't change it to zero, other code could
1793 /// optimized based on the contradictory assumption that it is non-zero.
1794 /// Because instcombine aggressively folds operations with undef args anyway,
1795 /// this won't lose us code quality.
1796 ///
1797 /// This function is defined on values with integer type, values with pointer
1798 /// type, and vectors of integers.  In the case
1799 /// where V is a vector, known zero, and known one values are the
1800 /// same width as the vector element, and the bit is set only if it is true
1801 /// for all of the demanded elements in the vector specified by DemandedElts.
1802 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1803                       KnownBits &Known, unsigned Depth, const Query &Q) {
1804   if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1805     // No demanded elts or V is a scalable vector, better to assume we don't
1806     // know anything.
1807     Known.resetAll();
1808     return;
1809   }
1810 
1811   assert(V && "No Value?");
1812   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1813 
1814 #ifndef NDEBUG
1815   Type *Ty = V->getType();
1816   unsigned BitWidth = Known.getBitWidth();
1817 
1818   assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1819          "Not integer or pointer type!");
1820 
1821   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1822     assert(
1823         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1824         "DemandedElt width should equal the fixed vector number of elements");
1825   } else {
1826     assert(DemandedElts == APInt(1, 1) &&
1827            "DemandedElt width should be 1 for scalars");
1828   }
1829 
1830   Type *ScalarTy = Ty->getScalarType();
1831   if (ScalarTy->isPointerTy()) {
1832     assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1833            "V and Known should have same BitWidth");
1834   } else {
1835     assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1836            "V and Known should have same BitWidth");
1837   }
1838 #endif
1839 
1840   const APInt *C;
1841   if (match(V, m_APInt(C))) {
1842     // We know all of the bits for a scalar constant or a splat vector constant!
1843     Known.One = *C;
1844     Known.Zero = ~Known.One;
1845     return;
1846   }
1847   // Null and aggregate-zero are all-zeros.
1848   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1849     Known.setAllZero();
1850     return;
1851   }
1852   // Handle a constant vector by taking the intersection of the known bits of
1853   // each element.
1854   if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1855     // We know that CDV must be a vector of integers. Take the intersection of
1856     // each element.
1857     Known.Zero.setAllBits(); Known.One.setAllBits();
1858     for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1859       if (!DemandedElts[i])
1860         continue;
1861       APInt Elt = CDV->getElementAsAPInt(i);
1862       Known.Zero &= ~Elt;
1863       Known.One &= Elt;
1864     }
1865     return;
1866   }
1867 
1868   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1869     // We know that CV must be a vector of integers. Take the intersection of
1870     // each element.
1871     Known.Zero.setAllBits(); Known.One.setAllBits();
1872     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1873       if (!DemandedElts[i])
1874         continue;
1875       Constant *Element = CV->getAggregateElement(i);
1876       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1877       if (!ElementCI) {
1878         Known.resetAll();
1879         return;
1880       }
1881       const APInt &Elt = ElementCI->getValue();
1882       Known.Zero &= ~Elt;
1883       Known.One &= Elt;
1884     }
1885     return;
1886   }
1887 
1888   // Start out not knowing anything.
1889   Known.resetAll();
1890 
1891   // We can't imply anything about undefs.
1892   if (isa<UndefValue>(V))
1893     return;
1894 
1895   // There's no point in looking through other users of ConstantData for
1896   // assumptions.  Confirm that we've handled them all.
1897   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1898 
1899   // All recursive calls that increase depth must come after this.
1900   if (Depth == MaxAnalysisRecursionDepth)
1901     return;
1902 
1903   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1904   // the bits of its aliasee.
1905   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1906     if (!GA->isInterposable())
1907       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1908     return;
1909   }
1910 
1911   if (const Operator *I = dyn_cast<Operator>(V))
1912     computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1913 
1914   // Aligned pointers have trailing zeros - refine Known.Zero set
1915   if (isa<PointerType>(V->getType())) {
1916     Align Alignment = V->getPointerAlignment(Q.DL);
1917     Known.Zero.setLowBits(Log2(Alignment));
1918   }
1919 
1920   // computeKnownBitsFromAssume strictly refines Known.
1921   // Therefore, we run them after computeKnownBitsFromOperator.
1922 
1923   // Check whether a nearby assume intrinsic can determine some known bits.
1924   computeKnownBitsFromAssume(V, Known, Depth, Q);
1925 
1926   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1927 }
1928 
1929 /// Return true if the given value is known to have exactly one
1930 /// bit set when defined. For vectors return true if every element is known to
1931 /// be a power of two when defined. Supports values with integer or pointer
1932 /// types and vectors of integers.
1933 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1934                             const Query &Q) {
1935   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1936 
1937   // Attempt to match against constants.
1938   if (OrZero && match(V, m_Power2OrZero()))
1939       return true;
1940   if (match(V, m_Power2()))
1941       return true;
1942 
1943   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1944   // it is shifted off the end then the result is undefined.
1945   if (match(V, m_Shl(m_One(), m_Value())))
1946     return true;
1947 
1948   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1949   // the bottom.  If it is shifted off the bottom then the result is undefined.
1950   if (match(V, m_LShr(m_SignMask(), m_Value())))
1951     return true;
1952 
1953   // The remaining tests are all recursive, so bail out if we hit the limit.
1954   if (Depth++ == MaxAnalysisRecursionDepth)
1955     return false;
1956 
1957   Value *X = nullptr, *Y = nullptr;
1958   // A shift left or a logical shift right of a power of two is a power of two
1959   // or zero.
1960   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1961                  match(V, m_LShr(m_Value(X), m_Value()))))
1962     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1963 
1964   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1965     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1966 
1967   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1968     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1969            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1970 
1971   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1972     // A power of two and'd with anything is a power of two or zero.
1973     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1974         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1975       return true;
1976     // X & (-X) is always a power of two or zero.
1977     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1978       return true;
1979     return false;
1980   }
1981 
1982   // Adding a power-of-two or zero to the same power-of-two or zero yields
1983   // either the original power-of-two, a larger power-of-two or zero.
1984   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1985     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1986     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1987         Q.IIQ.hasNoSignedWrap(VOBO)) {
1988       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1989           match(X, m_And(m_Value(), m_Specific(Y))))
1990         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1991           return true;
1992       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1993           match(Y, m_And(m_Value(), m_Specific(X))))
1994         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1995           return true;
1996 
1997       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1998       KnownBits LHSBits(BitWidth);
1999       computeKnownBits(X, LHSBits, Depth, Q);
2000 
2001       KnownBits RHSBits(BitWidth);
2002       computeKnownBits(Y, RHSBits, Depth, Q);
2003       // If i8 V is a power of two or zero:
2004       //  ZeroBits: 1 1 1 0 1 1 1 1
2005       // ~ZeroBits: 0 0 0 1 0 0 0 0
2006       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2007         // If OrZero isn't set, we cannot give back a zero result.
2008         // Make sure either the LHS or RHS has a bit set.
2009         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2010           return true;
2011     }
2012   }
2013 
2014   // An exact divide or right shift can only shift off zero bits, so the result
2015   // is a power of two only if the first operand is a power of two and not
2016   // copying a sign bit (sdiv int_min, 2).
2017   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2018       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2019     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2020                                   Depth, Q);
2021   }
2022 
2023   return false;
2024 }
2025 
2026 /// Test whether a GEP's result is known to be non-null.
2027 ///
2028 /// Uses properties inherent in a GEP to try to determine whether it is known
2029 /// to be non-null.
2030 ///
2031 /// Currently this routine does not support vector GEPs.
2032 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2033                               const Query &Q) {
2034   const Function *F = nullptr;
2035   if (const Instruction *I = dyn_cast<Instruction>(GEP))
2036     F = I->getFunction();
2037 
2038   if (!GEP->isInBounds() ||
2039       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2040     return false;
2041 
2042   // FIXME: Support vector-GEPs.
2043   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2044 
2045   // If the base pointer is non-null, we cannot walk to a null address with an
2046   // inbounds GEP in address space zero.
2047   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2048     return true;
2049 
2050   // Walk the GEP operands and see if any operand introduces a non-zero offset.
2051   // If so, then the GEP cannot produce a null pointer, as doing so would
2052   // inherently violate the inbounds contract within address space zero.
2053   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2054        GTI != GTE; ++GTI) {
2055     // Struct types are easy -- they must always be indexed by a constant.
2056     if (StructType *STy = GTI.getStructTypeOrNull()) {
2057       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2058       unsigned ElementIdx = OpC->getZExtValue();
2059       const StructLayout *SL = Q.DL.getStructLayout(STy);
2060       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2061       if (ElementOffset > 0)
2062         return true;
2063       continue;
2064     }
2065 
2066     // If we have a zero-sized type, the index doesn't matter. Keep looping.
2067     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2068       continue;
2069 
2070     // Fast path the constant operand case both for efficiency and so we don't
2071     // increment Depth when just zipping down an all-constant GEP.
2072     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2073       if (!OpC->isZero())
2074         return true;
2075       continue;
2076     }
2077 
2078     // We post-increment Depth here because while isKnownNonZero increments it
2079     // as well, when we pop back up that increment won't persist. We don't want
2080     // to recurse 10k times just because we have 10k GEP operands. We don't
2081     // bail completely out because we want to handle constant GEPs regardless
2082     // of depth.
2083     if (Depth++ >= MaxAnalysisRecursionDepth)
2084       continue;
2085 
2086     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2087       return true;
2088   }
2089 
2090   return false;
2091 }
2092 
2093 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2094                                                   const Instruction *CtxI,
2095                                                   const DominatorTree *DT) {
2096   if (isa<Constant>(V))
2097     return false;
2098 
2099   if (!CtxI || !DT)
2100     return false;
2101 
2102   unsigned NumUsesExplored = 0;
2103   for (auto *U : V->users()) {
2104     // Avoid massive lists
2105     if (NumUsesExplored >= DomConditionsMaxUses)
2106       break;
2107     NumUsesExplored++;
2108 
2109     // If the value is used as an argument to a call or invoke, then argument
2110     // attributes may provide an answer about null-ness.
2111     if (const auto *CB = dyn_cast<CallBase>(U))
2112       if (auto *CalledFunc = CB->getCalledFunction())
2113         for (const Argument &Arg : CalledFunc->args())
2114           if (CB->getArgOperand(Arg.getArgNo()) == V &&
2115               Arg.hasNonNullAttr() && DT->dominates(CB, CtxI))
2116             return true;
2117 
2118     // If the value is used as a load/store, then the pointer must be non null.
2119     if (V == getLoadStorePointerOperand(U)) {
2120       const Instruction *I = cast<Instruction>(U);
2121       if (!NullPointerIsDefined(I->getFunction(),
2122                                 V->getType()->getPointerAddressSpace()) &&
2123           DT->dominates(I, CtxI))
2124         return true;
2125     }
2126 
2127     // Consider only compare instructions uniquely controlling a branch
2128     CmpInst::Predicate Pred;
2129     if (!match(const_cast<User *>(U),
2130                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
2131         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
2132       continue;
2133 
2134     SmallVector<const User *, 4> WorkList;
2135     SmallPtrSet<const User *, 4> Visited;
2136     for (auto *CmpU : U->users()) {
2137       assert(WorkList.empty() && "Should be!");
2138       if (Visited.insert(CmpU).second)
2139         WorkList.push_back(CmpU);
2140 
2141       while (!WorkList.empty()) {
2142         auto *Curr = WorkList.pop_back_val();
2143 
2144         // If a user is an AND, add all its users to the work list. We only
2145         // propagate "pred != null" condition through AND because it is only
2146         // correct to assume that all conditions of AND are met in true branch.
2147         // TODO: Support similar logic of OR and EQ predicate?
2148         if (Pred == ICmpInst::ICMP_NE)
2149           if (auto *BO = dyn_cast<BinaryOperator>(Curr))
2150             if (BO->getOpcode() == Instruction::And) {
2151               for (auto *BOU : BO->users())
2152                 if (Visited.insert(BOU).second)
2153                   WorkList.push_back(BOU);
2154               continue;
2155             }
2156 
2157         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2158           assert(BI->isConditional() && "uses a comparison!");
2159 
2160           BasicBlock *NonNullSuccessor =
2161               BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
2162           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2163           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2164             return true;
2165         } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
2166                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
2167           return true;
2168         }
2169       }
2170     }
2171   }
2172 
2173   return false;
2174 }
2175 
2176 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2177 /// ensure that the value it's attached to is never Value?  'RangeType' is
2178 /// is the type of the value described by the range.
2179 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2180   const unsigned NumRanges = Ranges->getNumOperands() / 2;
2181   assert(NumRanges >= 1);
2182   for (unsigned i = 0; i < NumRanges; ++i) {
2183     ConstantInt *Lower =
2184         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2185     ConstantInt *Upper =
2186         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2187     ConstantRange Range(Lower->getValue(), Upper->getValue());
2188     if (Range.contains(Value))
2189       return false;
2190   }
2191   return true;
2192 }
2193 
2194 /// Return true if the given value is known to be non-zero when defined. For
2195 /// vectors, return true if every demanded element is known to be non-zero when
2196 /// defined. For pointers, if the context instruction and dominator tree are
2197 /// specified, perform context-sensitive analysis and return true if the
2198 /// pointer couldn't possibly be null at the specified instruction.
2199 /// Supports values with integer or pointer type and vectors of integers.
2200 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2201                     const Query &Q) {
2202   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2203   // vector
2204   if (isa<ScalableVectorType>(V->getType()))
2205     return false;
2206 
2207   if (auto *C = dyn_cast<Constant>(V)) {
2208     if (C->isNullValue())
2209       return false;
2210     if (isa<ConstantInt>(C))
2211       // Must be non-zero due to null test above.
2212       return true;
2213 
2214     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2215       // See the comment for IntToPtr/PtrToInt instructions below.
2216       if (CE->getOpcode() == Instruction::IntToPtr ||
2217           CE->getOpcode() == Instruction::PtrToInt)
2218         if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2219                 .getFixedSize() <=
2220             Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2221           return isKnownNonZero(CE->getOperand(0), Depth, Q);
2222     }
2223 
2224     // For constant vectors, check that all elements are undefined or known
2225     // non-zero to determine that the whole vector is known non-zero.
2226     if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2227       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2228         if (!DemandedElts[i])
2229           continue;
2230         Constant *Elt = C->getAggregateElement(i);
2231         if (!Elt || Elt->isNullValue())
2232           return false;
2233         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2234           return false;
2235       }
2236       return true;
2237     }
2238 
2239     // A global variable in address space 0 is non null unless extern weak
2240     // or an absolute symbol reference. Other address spaces may have null as a
2241     // valid address for a global, so we can't assume anything.
2242     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2243       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2244           GV->getType()->getAddressSpace() == 0)
2245         return true;
2246     } else
2247       return false;
2248   }
2249 
2250   if (auto *I = dyn_cast<Instruction>(V)) {
2251     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2252       // If the possible ranges don't contain zero, then the value is
2253       // definitely non-zero.
2254       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2255         const APInt ZeroValue(Ty->getBitWidth(), 0);
2256         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2257           return true;
2258       }
2259     }
2260   }
2261 
2262   if (isKnownNonZeroFromAssume(V, Q))
2263     return true;
2264 
2265   // Some of the tests below are recursive, so bail out if we hit the limit.
2266   if (Depth++ >= MaxAnalysisRecursionDepth)
2267     return false;
2268 
2269   // Check for pointer simplifications.
2270 
2271   if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2272     // Alloca never returns null, malloc might.
2273     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2274       return true;
2275 
2276     // A byval, inalloca may not be null in a non-default addres space. A
2277     // nonnull argument is assumed never 0.
2278     if (const Argument *A = dyn_cast<Argument>(V)) {
2279       if (((A->hasPassPointeeByValueCopyAttr() &&
2280             !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2281            A->hasNonNullAttr()))
2282         return true;
2283     }
2284 
2285     // A Load tagged with nonnull metadata is never null.
2286     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2287       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2288         return true;
2289 
2290     if (const auto *Call = dyn_cast<CallBase>(V)) {
2291       if (Call->isReturnNonNull())
2292         return true;
2293       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2294         return isKnownNonZero(RP, Depth, Q);
2295     }
2296   }
2297 
2298   if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2299     return true;
2300 
2301   // Check for recursive pointer simplifications.
2302   if (V->getType()->isPointerTy()) {
2303     // Look through bitcast operations, GEPs, and int2ptr instructions as they
2304     // do not alter the value, or at least not the nullness property of the
2305     // value, e.g., int2ptr is allowed to zero/sign extend the value.
2306     //
2307     // Note that we have to take special care to avoid looking through
2308     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2309     // as casts that can alter the value, e.g., AddrSpaceCasts.
2310     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2311       return isGEPKnownNonNull(GEP, Depth, Q);
2312 
2313     if (auto *BCO = dyn_cast<BitCastOperator>(V))
2314       return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2315 
2316     if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2317       if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2318           Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2319         return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2320   }
2321 
2322   // Similar to int2ptr above, we can look through ptr2int here if the cast
2323   // is a no-op or an extend and not a truncate.
2324   if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2325     if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2326         Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2327       return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2328 
2329   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2330 
2331   // X | Y != 0 if X != 0 or Y != 0.
2332   Value *X = nullptr, *Y = nullptr;
2333   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2334     return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2335            isKnownNonZero(Y, DemandedElts, Depth, Q);
2336 
2337   // ext X != 0 if X != 0.
2338   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2339     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2340 
2341   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2342   // if the lowest bit is shifted off the end.
2343   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2344     // shl nuw can't remove any non-zero bits.
2345     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2346     if (Q.IIQ.hasNoUnsignedWrap(BO))
2347       return isKnownNonZero(X, Depth, Q);
2348 
2349     KnownBits Known(BitWidth);
2350     computeKnownBits(X, DemandedElts, Known, Depth, Q);
2351     if (Known.One[0])
2352       return true;
2353   }
2354   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2355   // defined if the sign bit is shifted off the end.
2356   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2357     // shr exact can only shift out zero bits.
2358     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2359     if (BO->isExact())
2360       return isKnownNonZero(X, Depth, Q);
2361 
2362     KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2363     if (Known.isNegative())
2364       return true;
2365 
2366     // If the shifter operand is a constant, and all of the bits shifted
2367     // out are known to be zero, and X is known non-zero then at least one
2368     // non-zero bit must remain.
2369     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2370       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2371       // Is there a known one in the portion not shifted out?
2372       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2373         return true;
2374       // Are all the bits to be shifted out known zero?
2375       if (Known.countMinTrailingZeros() >= ShiftVal)
2376         return isKnownNonZero(X, DemandedElts, Depth, Q);
2377     }
2378   }
2379   // div exact can only produce a zero if the dividend is zero.
2380   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2381     return isKnownNonZero(X, DemandedElts, Depth, Q);
2382   }
2383   // X + Y.
2384   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2385     KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2386     KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2387 
2388     // If X and Y are both non-negative (as signed values) then their sum is not
2389     // zero unless both X and Y are zero.
2390     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2391       if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2392           isKnownNonZero(Y, DemandedElts, Depth, Q))
2393         return true;
2394 
2395     // If X and Y are both negative (as signed values) then their sum is not
2396     // zero unless both X and Y equal INT_MIN.
2397     if (XKnown.isNegative() && YKnown.isNegative()) {
2398       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2399       // The sign bit of X is set.  If some other bit is set then X is not equal
2400       // to INT_MIN.
2401       if (XKnown.One.intersects(Mask))
2402         return true;
2403       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2404       // to INT_MIN.
2405       if (YKnown.One.intersects(Mask))
2406         return true;
2407     }
2408 
2409     // The sum of a non-negative number and a power of two is not zero.
2410     if (XKnown.isNonNegative() &&
2411         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2412       return true;
2413     if (YKnown.isNonNegative() &&
2414         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2415       return true;
2416   }
2417   // X * Y.
2418   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2419     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2420     // If X and Y are non-zero then so is X * Y as long as the multiplication
2421     // does not overflow.
2422     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2423         isKnownNonZero(X, DemandedElts, Depth, Q) &&
2424         isKnownNonZero(Y, DemandedElts, Depth, Q))
2425       return true;
2426   }
2427   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2428   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2429     if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2430         isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2431       return true;
2432   }
2433   // PHI
2434   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2435     // Try and detect a recurrence that monotonically increases from a
2436     // starting value, as these are common as induction variables.
2437     if (PN->getNumIncomingValues() == 2) {
2438       Value *Start = PN->getIncomingValue(0);
2439       Value *Induction = PN->getIncomingValue(1);
2440       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2441         std::swap(Start, Induction);
2442       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2443         if (!C->isZero() && !C->isNegative()) {
2444           ConstantInt *X;
2445           if (Q.IIQ.UseInstrInfo &&
2446               (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2447                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2448               !X->isNegative())
2449             return true;
2450         }
2451       }
2452     }
2453     // Check if all incoming values are non-zero using recursion.
2454     Query RecQ = Q;
2455     unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2456     return llvm::all_of(PN->operands(), [&](const Use &U) {
2457       if (U.get() == PN)
2458         return true;
2459       RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2460       return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2461     });
2462   }
2463   // ExtractElement
2464   else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2465     const Value *Vec = EEI->getVectorOperand();
2466     const Value *Idx = EEI->getIndexOperand();
2467     auto *CIdx = dyn_cast<ConstantInt>(Idx);
2468     if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2469       unsigned NumElts = VecTy->getNumElements();
2470       APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2471       if (CIdx && CIdx->getValue().ult(NumElts))
2472         DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2473       return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2474     }
2475   }
2476   // Freeze
2477   else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2478     auto *Op = FI->getOperand(0);
2479     if (isKnownNonZero(Op, Depth, Q) &&
2480         isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2481       return true;
2482   }
2483 
2484   KnownBits Known(BitWidth);
2485   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2486   return Known.One != 0;
2487 }
2488 
2489 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2490   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2491   // vector
2492   if (isa<ScalableVectorType>(V->getType()))
2493     return false;
2494 
2495   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2496   APInt DemandedElts =
2497       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2498   return isKnownNonZero(V, DemandedElts, Depth, Q);
2499 }
2500 
2501 /// Return true if V2 == V1 + X, where X is known non-zero.
2502 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2503   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2504   if (!BO || BO->getOpcode() != Instruction::Add)
2505     return false;
2506   Value *Op = nullptr;
2507   if (V2 == BO->getOperand(0))
2508     Op = BO->getOperand(1);
2509   else if (V2 == BO->getOperand(1))
2510     Op = BO->getOperand(0);
2511   else
2512     return false;
2513   return isKnownNonZero(Op, 0, Q);
2514 }
2515 
2516 /// Return true if it is known that V1 != V2.
2517 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2518   if (V1 == V2)
2519     return false;
2520   if (V1->getType() != V2->getType())
2521     // We can't look through casts yet.
2522     return false;
2523   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2524     return true;
2525 
2526   if (V1->getType()->isIntOrIntVectorTy()) {
2527     // Are any known bits in V1 contradictory to known bits in V2? If V1
2528     // has a known zero where V2 has a known one, they must not be equal.
2529     KnownBits Known1 = computeKnownBits(V1, 0, Q);
2530     KnownBits Known2 = computeKnownBits(V2, 0, Q);
2531 
2532     if (Known1.Zero.intersects(Known2.One) ||
2533         Known2.Zero.intersects(Known1.One))
2534       return true;
2535   }
2536   return false;
2537 }
2538 
2539 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2540 /// simplify operations downstream. Mask is known to be zero for bits that V
2541 /// cannot have.
2542 ///
2543 /// This function is defined on values with integer type, values with pointer
2544 /// type, and vectors of integers.  In the case
2545 /// where V is a vector, the mask, known zero, and known one values are the
2546 /// same width as the vector element, and the bit is set only if it is true
2547 /// for all of the elements in the vector.
2548 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2549                        const Query &Q) {
2550   KnownBits Known(Mask.getBitWidth());
2551   computeKnownBits(V, Known, Depth, Q);
2552   return Mask.isSubsetOf(Known.Zero);
2553 }
2554 
2555 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2556 // Returns the input and lower/upper bounds.
2557 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2558                                 const APInt *&CLow, const APInt *&CHigh) {
2559   assert(isa<Operator>(Select) &&
2560          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2561          "Input should be a Select!");
2562 
2563   const Value *LHS = nullptr, *RHS = nullptr;
2564   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2565   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2566     return false;
2567 
2568   if (!match(RHS, m_APInt(CLow)))
2569     return false;
2570 
2571   const Value *LHS2 = nullptr, *RHS2 = nullptr;
2572   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2573   if (getInverseMinMaxFlavor(SPF) != SPF2)
2574     return false;
2575 
2576   if (!match(RHS2, m_APInt(CHigh)))
2577     return false;
2578 
2579   if (SPF == SPF_SMIN)
2580     std::swap(CLow, CHigh);
2581 
2582   In = LHS2;
2583   return CLow->sle(*CHigh);
2584 }
2585 
2586 /// For vector constants, loop over the elements and find the constant with the
2587 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2588 /// or if any element was not analyzed; otherwise, return the count for the
2589 /// element with the minimum number of sign bits.
2590 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2591                                                  const APInt &DemandedElts,
2592                                                  unsigned TyBits) {
2593   const auto *CV = dyn_cast<Constant>(V);
2594   if (!CV || !isa<FixedVectorType>(CV->getType()))
2595     return 0;
2596 
2597   unsigned MinSignBits = TyBits;
2598   unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2599   for (unsigned i = 0; i != NumElts; ++i) {
2600     if (!DemandedElts[i])
2601       continue;
2602     // If we find a non-ConstantInt, bail out.
2603     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2604     if (!Elt)
2605       return 0;
2606 
2607     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2608   }
2609 
2610   return MinSignBits;
2611 }
2612 
2613 static unsigned ComputeNumSignBitsImpl(const Value *V,
2614                                        const APInt &DemandedElts,
2615                                        unsigned Depth, const Query &Q);
2616 
2617 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2618                                    unsigned Depth, const Query &Q) {
2619   unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2620   assert(Result > 0 && "At least one sign bit needs to be present!");
2621   return Result;
2622 }
2623 
2624 /// Return the number of times the sign bit of the register is replicated into
2625 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2626 /// (itself), but other cases can give us information. For example, immediately
2627 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2628 /// other, so we return 3. For vectors, return the number of sign bits for the
2629 /// vector element with the minimum number of known sign bits of the demanded
2630 /// elements in the vector specified by DemandedElts.
2631 static unsigned ComputeNumSignBitsImpl(const Value *V,
2632                                        const APInt &DemandedElts,
2633                                        unsigned Depth, const Query &Q) {
2634   Type *Ty = V->getType();
2635 
2636   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2637   // vector
2638   if (isa<ScalableVectorType>(Ty))
2639     return 1;
2640 
2641 #ifndef NDEBUG
2642   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2643 
2644   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2645     assert(
2646         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2647         "DemandedElt width should equal the fixed vector number of elements");
2648   } else {
2649     assert(DemandedElts == APInt(1, 1) &&
2650            "DemandedElt width should be 1 for scalars");
2651   }
2652 #endif
2653 
2654   // We return the minimum number of sign bits that are guaranteed to be present
2655   // in V, so for undef we have to conservatively return 1.  We don't have the
2656   // same behavior for poison though -- that's a FIXME today.
2657 
2658   Type *ScalarTy = Ty->getScalarType();
2659   unsigned TyBits = ScalarTy->isPointerTy() ?
2660     Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2661     Q.DL.getTypeSizeInBits(ScalarTy);
2662 
2663   unsigned Tmp, Tmp2;
2664   unsigned FirstAnswer = 1;
2665 
2666   // Note that ConstantInt is handled by the general computeKnownBits case
2667   // below.
2668 
2669   if (Depth == MaxAnalysisRecursionDepth)
2670     return 1;
2671 
2672   if (auto *U = dyn_cast<Operator>(V)) {
2673     switch (Operator::getOpcode(V)) {
2674     default: break;
2675     case Instruction::SExt:
2676       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2677       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2678 
2679     case Instruction::SDiv: {
2680       const APInt *Denominator;
2681       // sdiv X, C -> adds log(C) sign bits.
2682       if (match(U->getOperand(1), m_APInt(Denominator))) {
2683 
2684         // Ignore non-positive denominator.
2685         if (!Denominator->isStrictlyPositive())
2686           break;
2687 
2688         // Calculate the incoming numerator bits.
2689         unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2690 
2691         // Add floor(log(C)) bits to the numerator bits.
2692         return std::min(TyBits, NumBits + Denominator->logBase2());
2693       }
2694       break;
2695     }
2696 
2697     case Instruction::SRem: {
2698       const APInt *Denominator;
2699       // srem X, C -> we know that the result is within [-C+1,C) when C is a
2700       // positive constant.  This let us put a lower bound on the number of sign
2701       // bits.
2702       if (match(U->getOperand(1), m_APInt(Denominator))) {
2703 
2704         // Ignore non-positive denominator.
2705         if (!Denominator->isStrictlyPositive())
2706           break;
2707 
2708         // Calculate the incoming numerator bits. SRem by a positive constant
2709         // can't lower the number of sign bits.
2710         unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2711 
2712         // Calculate the leading sign bit constraints by examining the
2713         // denominator.  Given that the denominator is positive, there are two
2714         // cases:
2715         //
2716         //  1. the numerator is positive. The result range is [0,C) and [0,C) u<
2717         //     (1 << ceilLogBase2(C)).
2718         //
2719         //  2. the numerator is negative. Then the result range is (-C,0] and
2720         //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2721         //
2722         // Thus a lower bound on the number of sign bits is `TyBits -
2723         // ceilLogBase2(C)`.
2724 
2725         unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2726         return std::max(NumrBits, ResBits);
2727       }
2728       break;
2729     }
2730 
2731     case Instruction::AShr: {
2732       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2733       // ashr X, C   -> adds C sign bits.  Vectors too.
2734       const APInt *ShAmt;
2735       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2736         if (ShAmt->uge(TyBits))
2737           break; // Bad shift.
2738         unsigned ShAmtLimited = ShAmt->getZExtValue();
2739         Tmp += ShAmtLimited;
2740         if (Tmp > TyBits) Tmp = TyBits;
2741       }
2742       return Tmp;
2743     }
2744     case Instruction::Shl: {
2745       const APInt *ShAmt;
2746       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2747         // shl destroys sign bits.
2748         Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2749         if (ShAmt->uge(TyBits) ||   // Bad shift.
2750             ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2751         Tmp2 = ShAmt->getZExtValue();
2752         return Tmp - Tmp2;
2753       }
2754       break;
2755     }
2756     case Instruction::And:
2757     case Instruction::Or:
2758     case Instruction::Xor: // NOT is handled here.
2759       // Logical binary ops preserve the number of sign bits at the worst.
2760       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2761       if (Tmp != 1) {
2762         Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2763         FirstAnswer = std::min(Tmp, Tmp2);
2764         // We computed what we know about the sign bits as our first
2765         // answer. Now proceed to the generic code that uses
2766         // computeKnownBits, and pick whichever answer is better.
2767       }
2768       break;
2769 
2770     case Instruction::Select: {
2771       // If we have a clamp pattern, we know that the number of sign bits will
2772       // be the minimum of the clamp min/max range.
2773       const Value *X;
2774       const APInt *CLow, *CHigh;
2775       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2776         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2777 
2778       Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2779       if (Tmp == 1) break;
2780       Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2781       return std::min(Tmp, Tmp2);
2782     }
2783 
2784     case Instruction::Add:
2785       // Add can have at most one carry bit.  Thus we know that the output
2786       // is, at worst, one more bit than the inputs.
2787       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2788       if (Tmp == 1) break;
2789 
2790       // Special case decrementing a value (ADD X, -1):
2791       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2792         if (CRHS->isAllOnesValue()) {
2793           KnownBits Known(TyBits);
2794           computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2795 
2796           // If the input is known to be 0 or 1, the output is 0/-1, which is
2797           // all sign bits set.
2798           if ((Known.Zero | 1).isAllOnesValue())
2799             return TyBits;
2800 
2801           // If we are subtracting one from a positive number, there is no carry
2802           // out of the result.
2803           if (Known.isNonNegative())
2804             return Tmp;
2805         }
2806 
2807       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2808       if (Tmp2 == 1) break;
2809       return std::min(Tmp, Tmp2) - 1;
2810 
2811     case Instruction::Sub:
2812       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2813       if (Tmp2 == 1) break;
2814 
2815       // Handle NEG.
2816       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2817         if (CLHS->isNullValue()) {
2818           KnownBits Known(TyBits);
2819           computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2820           // If the input is known to be 0 or 1, the output is 0/-1, which is
2821           // all sign bits set.
2822           if ((Known.Zero | 1).isAllOnesValue())
2823             return TyBits;
2824 
2825           // If the input is known to be positive (the sign bit is known clear),
2826           // the output of the NEG has the same number of sign bits as the
2827           // input.
2828           if (Known.isNonNegative())
2829             return Tmp2;
2830 
2831           // Otherwise, we treat this like a SUB.
2832         }
2833 
2834       // Sub can have at most one carry bit.  Thus we know that the output
2835       // is, at worst, one more bit than the inputs.
2836       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2837       if (Tmp == 1) break;
2838       return std::min(Tmp, Tmp2) - 1;
2839 
2840     case Instruction::Mul: {
2841       // The output of the Mul can be at most twice the valid bits in the
2842       // inputs.
2843       unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2844       if (SignBitsOp0 == 1) break;
2845       unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2846       if (SignBitsOp1 == 1) break;
2847       unsigned OutValidBits =
2848           (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2849       return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2850     }
2851 
2852     case Instruction::PHI: {
2853       const PHINode *PN = cast<PHINode>(U);
2854       unsigned NumIncomingValues = PN->getNumIncomingValues();
2855       // Don't analyze large in-degree PHIs.
2856       if (NumIncomingValues > 4) break;
2857       // Unreachable blocks may have zero-operand PHI nodes.
2858       if (NumIncomingValues == 0) break;
2859 
2860       // Take the minimum of all incoming values.  This can't infinitely loop
2861       // because of our depth threshold.
2862       Query RecQ = Q;
2863       Tmp = TyBits;
2864       for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
2865         if (Tmp == 1) return Tmp;
2866         RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
2867         Tmp = std::min(
2868             Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
2869       }
2870       return Tmp;
2871     }
2872 
2873     case Instruction::Trunc:
2874       // FIXME: it's tricky to do anything useful for this, but it is an
2875       // important case for targets like X86.
2876       break;
2877 
2878     case Instruction::ExtractElement:
2879       // Look through extract element. At the moment we keep this simple and
2880       // skip tracking the specific element. But at least we might find
2881       // information valid for all elements of the vector (for example if vector
2882       // is sign extended, shifted, etc).
2883       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2884 
2885     case Instruction::ShuffleVector: {
2886       // Collect the minimum number of sign bits that are shared by every vector
2887       // element referenced by the shuffle.
2888       auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
2889       if (!Shuf) {
2890         // FIXME: Add support for shufflevector constant expressions.
2891         return 1;
2892       }
2893       APInt DemandedLHS, DemandedRHS;
2894       // For undef elements, we don't know anything about the common state of
2895       // the shuffle result.
2896       if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
2897         return 1;
2898       Tmp = std::numeric_limits<unsigned>::max();
2899       if (!!DemandedLHS) {
2900         const Value *LHS = Shuf->getOperand(0);
2901         Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
2902       }
2903       // If we don't know anything, early out and try computeKnownBits
2904       // fall-back.
2905       if (Tmp == 1)
2906         break;
2907       if (!!DemandedRHS) {
2908         const Value *RHS = Shuf->getOperand(1);
2909         Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
2910         Tmp = std::min(Tmp, Tmp2);
2911       }
2912       // If we don't know anything, early out and try computeKnownBits
2913       // fall-back.
2914       if (Tmp == 1)
2915         break;
2916       assert(Tmp <= Ty->getScalarSizeInBits() &&
2917              "Failed to determine minimum sign bits");
2918       return Tmp;
2919     }
2920     case Instruction::Call: {
2921       if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
2922         switch (II->getIntrinsicID()) {
2923         default: break;
2924         case Intrinsic::abs:
2925           Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2926           if (Tmp == 1) break;
2927 
2928           // Absolute value reduces number of sign bits by at most 1.
2929           return Tmp - 1;
2930         }
2931       }
2932     }
2933     }
2934   }
2935 
2936   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2937   // use this information.
2938 
2939   // If we can examine all elements of a vector constant successfully, we're
2940   // done (we can't do any better than that). If not, keep trying.
2941   if (unsigned VecSignBits =
2942           computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
2943     return VecSignBits;
2944 
2945   KnownBits Known(TyBits);
2946   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2947 
2948   // If we know that the sign bit is either zero or one, determine the number of
2949   // identical bits in the top of the input value.
2950   return std::max(FirstAnswer, Known.countMinSignBits());
2951 }
2952 
2953 /// This function computes the integer multiple of Base that equals V.
2954 /// If successful, it returns true and returns the multiple in
2955 /// Multiple. If unsuccessful, it returns false. It looks
2956 /// through SExt instructions only if LookThroughSExt is true.
2957 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2958                            bool LookThroughSExt, unsigned Depth) {
2959   assert(V && "No Value?");
2960   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2961   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2962 
2963   Type *T = V->getType();
2964 
2965   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2966 
2967   if (Base == 0)
2968     return false;
2969 
2970   if (Base == 1) {
2971     Multiple = V;
2972     return true;
2973   }
2974 
2975   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2976   Constant *BaseVal = ConstantInt::get(T, Base);
2977   if (CO && CO == BaseVal) {
2978     // Multiple is 1.
2979     Multiple = ConstantInt::get(T, 1);
2980     return true;
2981   }
2982 
2983   if (CI && CI->getZExtValue() % Base == 0) {
2984     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2985     return true;
2986   }
2987 
2988   if (Depth == MaxAnalysisRecursionDepth) return false;
2989 
2990   Operator *I = dyn_cast<Operator>(V);
2991   if (!I) return false;
2992 
2993   switch (I->getOpcode()) {
2994   default: break;
2995   case Instruction::SExt:
2996     if (!LookThroughSExt) return false;
2997     // otherwise fall through to ZExt
2998     LLVM_FALLTHROUGH;
2999   case Instruction::ZExt:
3000     return ComputeMultiple(I->getOperand(0), Base, Multiple,
3001                            LookThroughSExt, Depth+1);
3002   case Instruction::Shl:
3003   case Instruction::Mul: {
3004     Value *Op0 = I->getOperand(0);
3005     Value *Op1 = I->getOperand(1);
3006 
3007     if (I->getOpcode() == Instruction::Shl) {
3008       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3009       if (!Op1CI) return false;
3010       // Turn Op0 << Op1 into Op0 * 2^Op1
3011       APInt Op1Int = Op1CI->getValue();
3012       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3013       APInt API(Op1Int.getBitWidth(), 0);
3014       API.setBit(BitToSet);
3015       Op1 = ConstantInt::get(V->getContext(), API);
3016     }
3017 
3018     Value *Mul0 = nullptr;
3019     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3020       if (Constant *Op1C = dyn_cast<Constant>(Op1))
3021         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3022           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3023               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3024             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3025           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3026               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3027             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3028 
3029           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3030           Multiple = ConstantExpr::getMul(MulC, Op1C);
3031           return true;
3032         }
3033 
3034       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3035         if (Mul0CI->getValue() == 1) {
3036           // V == Base * Op1, so return Op1
3037           Multiple = Op1;
3038           return true;
3039         }
3040     }
3041 
3042     Value *Mul1 = nullptr;
3043     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3044       if (Constant *Op0C = dyn_cast<Constant>(Op0))
3045         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3046           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3047               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3048             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3049           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3050               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3051             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3052 
3053           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3054           Multiple = ConstantExpr::getMul(MulC, Op0C);
3055           return true;
3056         }
3057 
3058       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3059         if (Mul1CI->getValue() == 1) {
3060           // V == Base * Op0, so return Op0
3061           Multiple = Op0;
3062           return true;
3063         }
3064     }
3065   }
3066   }
3067 
3068   // We could not determine if V is a multiple of Base.
3069   return false;
3070 }
3071 
3072 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3073                                             const TargetLibraryInfo *TLI) {
3074   const Function *F = CB.getCalledFunction();
3075   if (!F)
3076     return Intrinsic::not_intrinsic;
3077 
3078   if (F->isIntrinsic())
3079     return F->getIntrinsicID();
3080 
3081   // We are going to infer semantics of a library function based on mapping it
3082   // to an LLVM intrinsic. Check that the library function is available from
3083   // this callbase and in this environment.
3084   LibFunc Func;
3085   if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3086       !CB.onlyReadsMemory())
3087     return Intrinsic::not_intrinsic;
3088 
3089   switch (Func) {
3090   default:
3091     break;
3092   case LibFunc_sin:
3093   case LibFunc_sinf:
3094   case LibFunc_sinl:
3095     return Intrinsic::sin;
3096   case LibFunc_cos:
3097   case LibFunc_cosf:
3098   case LibFunc_cosl:
3099     return Intrinsic::cos;
3100   case LibFunc_exp:
3101   case LibFunc_expf:
3102   case LibFunc_expl:
3103     return Intrinsic::exp;
3104   case LibFunc_exp2:
3105   case LibFunc_exp2f:
3106   case LibFunc_exp2l:
3107     return Intrinsic::exp2;
3108   case LibFunc_log:
3109   case LibFunc_logf:
3110   case LibFunc_logl:
3111     return Intrinsic::log;
3112   case LibFunc_log10:
3113   case LibFunc_log10f:
3114   case LibFunc_log10l:
3115     return Intrinsic::log10;
3116   case LibFunc_log2:
3117   case LibFunc_log2f:
3118   case LibFunc_log2l:
3119     return Intrinsic::log2;
3120   case LibFunc_fabs:
3121   case LibFunc_fabsf:
3122   case LibFunc_fabsl:
3123     return Intrinsic::fabs;
3124   case LibFunc_fmin:
3125   case LibFunc_fminf:
3126   case LibFunc_fminl:
3127     return Intrinsic::minnum;
3128   case LibFunc_fmax:
3129   case LibFunc_fmaxf:
3130   case LibFunc_fmaxl:
3131     return Intrinsic::maxnum;
3132   case LibFunc_copysign:
3133   case LibFunc_copysignf:
3134   case LibFunc_copysignl:
3135     return Intrinsic::copysign;
3136   case LibFunc_floor:
3137   case LibFunc_floorf:
3138   case LibFunc_floorl:
3139     return Intrinsic::floor;
3140   case LibFunc_ceil:
3141   case LibFunc_ceilf:
3142   case LibFunc_ceill:
3143     return Intrinsic::ceil;
3144   case LibFunc_trunc:
3145   case LibFunc_truncf:
3146   case LibFunc_truncl:
3147     return Intrinsic::trunc;
3148   case LibFunc_rint:
3149   case LibFunc_rintf:
3150   case LibFunc_rintl:
3151     return Intrinsic::rint;
3152   case LibFunc_nearbyint:
3153   case LibFunc_nearbyintf:
3154   case LibFunc_nearbyintl:
3155     return Intrinsic::nearbyint;
3156   case LibFunc_round:
3157   case LibFunc_roundf:
3158   case LibFunc_roundl:
3159     return Intrinsic::round;
3160   case LibFunc_roundeven:
3161   case LibFunc_roundevenf:
3162   case LibFunc_roundevenl:
3163     return Intrinsic::roundeven;
3164   case LibFunc_pow:
3165   case LibFunc_powf:
3166   case LibFunc_powl:
3167     return Intrinsic::pow;
3168   case LibFunc_sqrt:
3169   case LibFunc_sqrtf:
3170   case LibFunc_sqrtl:
3171     return Intrinsic::sqrt;
3172   }
3173 
3174   return Intrinsic::not_intrinsic;
3175 }
3176 
3177 /// Return true if we can prove that the specified FP value is never equal to
3178 /// -0.0.
3179 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3180 ///       that a value is not -0.0. It only guarantees that -0.0 may be treated
3181 ///       the same as +0.0 in floating-point ops.
3182 ///
3183 /// NOTE: this function will need to be revisited when we support non-default
3184 /// rounding modes!
3185 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3186                                 unsigned Depth) {
3187   if (auto *CFP = dyn_cast<ConstantFP>(V))
3188     return !CFP->getValueAPF().isNegZero();
3189 
3190   if (Depth == MaxAnalysisRecursionDepth)
3191     return false;
3192 
3193   auto *Op = dyn_cast<Operator>(V);
3194   if (!Op)
3195     return false;
3196 
3197   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3198   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3199     return true;
3200 
3201   // sitofp and uitofp turn into +0.0 for zero.
3202   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3203     return true;
3204 
3205   if (auto *Call = dyn_cast<CallInst>(Op)) {
3206     Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3207     switch (IID) {
3208     default:
3209       break;
3210     // sqrt(-0.0) = -0.0, no other negative results are possible.
3211     case Intrinsic::sqrt:
3212     case Intrinsic::canonicalize:
3213       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3214     // fabs(x) != -0.0
3215     case Intrinsic::fabs:
3216       return true;
3217     }
3218   }
3219 
3220   return false;
3221 }
3222 
3223 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3224 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3225 /// bit despite comparing equal.
3226 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3227                                             const TargetLibraryInfo *TLI,
3228                                             bool SignBitOnly,
3229                                             unsigned Depth) {
3230   // TODO: This function does not do the right thing when SignBitOnly is true
3231   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3232   // which flips the sign bits of NaNs.  See
3233   // https://llvm.org/bugs/show_bug.cgi?id=31702.
3234 
3235   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3236     return !CFP->getValueAPF().isNegative() ||
3237            (!SignBitOnly && CFP->getValueAPF().isZero());
3238   }
3239 
3240   // Handle vector of constants.
3241   if (auto *CV = dyn_cast<Constant>(V)) {
3242     if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3243       unsigned NumElts = CVFVTy->getNumElements();
3244       for (unsigned i = 0; i != NumElts; ++i) {
3245         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3246         if (!CFP)
3247           return false;
3248         if (CFP->getValueAPF().isNegative() &&
3249             (SignBitOnly || !CFP->getValueAPF().isZero()))
3250           return false;
3251       }
3252 
3253       // All non-negative ConstantFPs.
3254       return true;
3255     }
3256   }
3257 
3258   if (Depth == MaxAnalysisRecursionDepth)
3259     return false;
3260 
3261   const Operator *I = dyn_cast<Operator>(V);
3262   if (!I)
3263     return false;
3264 
3265   switch (I->getOpcode()) {
3266   default:
3267     break;
3268   // Unsigned integers are always nonnegative.
3269   case Instruction::UIToFP:
3270     return true;
3271   case Instruction::FMul:
3272   case Instruction::FDiv:
3273     // X * X is always non-negative or a NaN.
3274     // X / X is always exactly 1.0 or a NaN.
3275     if (I->getOperand(0) == I->getOperand(1) &&
3276         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3277       return true;
3278 
3279     LLVM_FALLTHROUGH;
3280   case Instruction::FAdd:
3281   case Instruction::FRem:
3282     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3283                                            Depth + 1) &&
3284            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3285                                            Depth + 1);
3286   case Instruction::Select:
3287     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3288                                            Depth + 1) &&
3289            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3290                                            Depth + 1);
3291   case Instruction::FPExt:
3292   case Instruction::FPTrunc:
3293     // Widening/narrowing never change sign.
3294     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3295                                            Depth + 1);
3296   case Instruction::ExtractElement:
3297     // Look through extract element. At the moment we keep this simple and skip
3298     // tracking the specific element. But at least we might find information
3299     // valid for all elements of the vector.
3300     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3301                                            Depth + 1);
3302   case Instruction::Call:
3303     const auto *CI = cast<CallInst>(I);
3304     Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3305     switch (IID) {
3306     default:
3307       break;
3308     case Intrinsic::maxnum: {
3309       Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3310       auto isPositiveNum = [&](Value *V) {
3311         if (SignBitOnly) {
3312           // With SignBitOnly, this is tricky because the result of
3313           // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3314           // a constant strictly greater than 0.0.
3315           const APFloat *C;
3316           return match(V, m_APFloat(C)) &&
3317                  *C > APFloat::getZero(C->getSemantics());
3318         }
3319 
3320         // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3321         // maxnum can't be ordered-less-than-zero.
3322         return isKnownNeverNaN(V, TLI) &&
3323                cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3324       };
3325 
3326       // TODO: This could be improved. We could also check that neither operand
3327       //       has its sign bit set (and at least 1 is not-NAN?).
3328       return isPositiveNum(V0) || isPositiveNum(V1);
3329     }
3330 
3331     case Intrinsic::maximum:
3332       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3333                                              Depth + 1) ||
3334              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3335                                              Depth + 1);
3336     case Intrinsic::minnum:
3337     case Intrinsic::minimum:
3338       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3339                                              Depth + 1) &&
3340              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3341                                              Depth + 1);
3342     case Intrinsic::exp:
3343     case Intrinsic::exp2:
3344     case Intrinsic::fabs:
3345       return true;
3346 
3347     case Intrinsic::sqrt:
3348       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3349       if (!SignBitOnly)
3350         return true;
3351       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3352                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3353 
3354     case Intrinsic::powi:
3355       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3356         // powi(x,n) is non-negative if n is even.
3357         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3358           return true;
3359       }
3360       // TODO: This is not correct.  Given that exp is an integer, here are the
3361       // ways that pow can return a negative value:
3362       //
3363       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3364       //   pow(-0, exp)   --> -inf if exp is negative odd.
3365       //   pow(-0, exp)   --> -0 if exp is positive odd.
3366       //   pow(-inf, exp) --> -0 if exp is negative odd.
3367       //   pow(-inf, exp) --> -inf if exp is positive odd.
3368       //
3369       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3370       // but we must return false if x == -0.  Unfortunately we do not currently
3371       // have a way of expressing this constraint.  See details in
3372       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3373       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3374                                              Depth + 1);
3375 
3376     case Intrinsic::fma:
3377     case Intrinsic::fmuladd:
3378       // x*x+y is non-negative if y is non-negative.
3379       return I->getOperand(0) == I->getOperand(1) &&
3380              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3381              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3382                                              Depth + 1);
3383     }
3384     break;
3385   }
3386   return false;
3387 }
3388 
3389 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3390                                        const TargetLibraryInfo *TLI) {
3391   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3392 }
3393 
3394 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3395   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3396 }
3397 
3398 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3399                                 unsigned Depth) {
3400   assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3401 
3402   // If we're told that infinities won't happen, assume they won't.
3403   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3404     if (FPMathOp->hasNoInfs())
3405       return true;
3406 
3407   // Handle scalar constants.
3408   if (auto *CFP = dyn_cast<ConstantFP>(V))
3409     return !CFP->isInfinity();
3410 
3411   if (Depth == MaxAnalysisRecursionDepth)
3412     return false;
3413 
3414   if (auto *Inst = dyn_cast<Instruction>(V)) {
3415     switch (Inst->getOpcode()) {
3416     case Instruction::Select: {
3417       return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3418              isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3419     }
3420     case Instruction::SIToFP:
3421     case Instruction::UIToFP: {
3422       // Get width of largest magnitude integer (remove a bit if signed).
3423       // This still works for a signed minimum value because the largest FP
3424       // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3425       int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3426       if (Inst->getOpcode() == Instruction::SIToFP)
3427         --IntSize;
3428 
3429       // If the exponent of the largest finite FP value can hold the largest
3430       // integer, the result of the cast must be finite.
3431       Type *FPTy = Inst->getType()->getScalarType();
3432       return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3433     }
3434     default:
3435       break;
3436     }
3437   }
3438 
3439   // try to handle fixed width vector constants
3440   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3441   if (VFVTy && isa<Constant>(V)) {
3442     // For vectors, verify that each element is not infinity.
3443     unsigned NumElts = VFVTy->getNumElements();
3444     for (unsigned i = 0; i != NumElts; ++i) {
3445       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3446       if (!Elt)
3447         return false;
3448       if (isa<UndefValue>(Elt))
3449         continue;
3450       auto *CElt = dyn_cast<ConstantFP>(Elt);
3451       if (!CElt || CElt->isInfinity())
3452         return false;
3453     }
3454     // All elements were confirmed non-infinity or undefined.
3455     return true;
3456   }
3457 
3458   // was not able to prove that V never contains infinity
3459   return false;
3460 }
3461 
3462 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3463                            unsigned Depth) {
3464   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3465 
3466   // If we're told that NaNs won't happen, assume they won't.
3467   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3468     if (FPMathOp->hasNoNaNs())
3469       return true;
3470 
3471   // Handle scalar constants.
3472   if (auto *CFP = dyn_cast<ConstantFP>(V))
3473     return !CFP->isNaN();
3474 
3475   if (Depth == MaxAnalysisRecursionDepth)
3476     return false;
3477 
3478   if (auto *Inst = dyn_cast<Instruction>(V)) {
3479     switch (Inst->getOpcode()) {
3480     case Instruction::FAdd:
3481     case Instruction::FSub:
3482       // Adding positive and negative infinity produces NaN.
3483       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3484              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3485              (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3486               isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3487 
3488     case Instruction::FMul:
3489       // Zero multiplied with infinity produces NaN.
3490       // FIXME: If neither side can be zero fmul never produces NaN.
3491       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3492              isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3493              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3494              isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3495 
3496     case Instruction::FDiv:
3497     case Instruction::FRem:
3498       // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3499       return false;
3500 
3501     case Instruction::Select: {
3502       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3503              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3504     }
3505     case Instruction::SIToFP:
3506     case Instruction::UIToFP:
3507       return true;
3508     case Instruction::FPTrunc:
3509     case Instruction::FPExt:
3510       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3511     default:
3512       break;
3513     }
3514   }
3515 
3516   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3517     switch (II->getIntrinsicID()) {
3518     case Intrinsic::canonicalize:
3519     case Intrinsic::fabs:
3520     case Intrinsic::copysign:
3521     case Intrinsic::exp:
3522     case Intrinsic::exp2:
3523     case Intrinsic::floor:
3524     case Intrinsic::ceil:
3525     case Intrinsic::trunc:
3526     case Intrinsic::rint:
3527     case Intrinsic::nearbyint:
3528     case Intrinsic::round:
3529     case Intrinsic::roundeven:
3530       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3531     case Intrinsic::sqrt:
3532       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3533              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3534     case Intrinsic::minnum:
3535     case Intrinsic::maxnum:
3536       // If either operand is not NaN, the result is not NaN.
3537       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3538              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3539     default:
3540       return false;
3541     }
3542   }
3543 
3544   // Try to handle fixed width vector constants
3545   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3546   if (VFVTy && isa<Constant>(V)) {
3547     // For vectors, verify that each element is not NaN.
3548     unsigned NumElts = VFVTy->getNumElements();
3549     for (unsigned i = 0; i != NumElts; ++i) {
3550       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3551       if (!Elt)
3552         return false;
3553       if (isa<UndefValue>(Elt))
3554         continue;
3555       auto *CElt = dyn_cast<ConstantFP>(Elt);
3556       if (!CElt || CElt->isNaN())
3557         return false;
3558     }
3559     // All elements were confirmed not-NaN or undefined.
3560     return true;
3561   }
3562 
3563   // Was not able to prove that V never contains NaN
3564   return false;
3565 }
3566 
3567 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3568 
3569   // All byte-wide stores are splatable, even of arbitrary variables.
3570   if (V->getType()->isIntegerTy(8))
3571     return V;
3572 
3573   LLVMContext &Ctx = V->getContext();
3574 
3575   // Undef don't care.
3576   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3577   if (isa<UndefValue>(V))
3578     return UndefInt8;
3579 
3580   // Return Undef for zero-sized type.
3581   if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3582     return UndefInt8;
3583 
3584   Constant *C = dyn_cast<Constant>(V);
3585   if (!C) {
3586     // Conceptually, we could handle things like:
3587     //   %a = zext i8 %X to i16
3588     //   %b = shl i16 %a, 8
3589     //   %c = or i16 %a, %b
3590     // but until there is an example that actually needs this, it doesn't seem
3591     // worth worrying about.
3592     return nullptr;
3593   }
3594 
3595   // Handle 'null' ConstantArrayZero etc.
3596   if (C->isNullValue())
3597     return Constant::getNullValue(Type::getInt8Ty(Ctx));
3598 
3599   // Constant floating-point values can be handled as integer values if the
3600   // corresponding integer value is "byteable".  An important case is 0.0.
3601   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3602     Type *Ty = nullptr;
3603     if (CFP->getType()->isHalfTy())
3604       Ty = Type::getInt16Ty(Ctx);
3605     else if (CFP->getType()->isFloatTy())
3606       Ty = Type::getInt32Ty(Ctx);
3607     else if (CFP->getType()->isDoubleTy())
3608       Ty = Type::getInt64Ty(Ctx);
3609     // Don't handle long double formats, which have strange constraints.
3610     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3611               : nullptr;
3612   }
3613 
3614   // We can handle constant integers that are multiple of 8 bits.
3615   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3616     if (CI->getBitWidth() % 8 == 0) {
3617       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3618       if (!CI->getValue().isSplat(8))
3619         return nullptr;
3620       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3621     }
3622   }
3623 
3624   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3625     if (CE->getOpcode() == Instruction::IntToPtr) {
3626       auto PS = DL.getPointerSizeInBits(
3627           cast<PointerType>(CE->getType())->getAddressSpace());
3628       return isBytewiseValue(
3629           ConstantExpr::getIntegerCast(CE->getOperand(0),
3630                                        Type::getIntNTy(Ctx, PS), false),
3631           DL);
3632     }
3633   }
3634 
3635   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3636     if (LHS == RHS)
3637       return LHS;
3638     if (!LHS || !RHS)
3639       return nullptr;
3640     if (LHS == UndefInt8)
3641       return RHS;
3642     if (RHS == UndefInt8)
3643       return LHS;
3644     return nullptr;
3645   };
3646 
3647   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3648     Value *Val = UndefInt8;
3649     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3650       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3651         return nullptr;
3652     return Val;
3653   }
3654 
3655   if (isa<ConstantAggregate>(C)) {
3656     Value *Val = UndefInt8;
3657     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3658       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3659         return nullptr;
3660     return Val;
3661   }
3662 
3663   // Don't try to handle the handful of other constants.
3664   return nullptr;
3665 }
3666 
3667 // This is the recursive version of BuildSubAggregate. It takes a few different
3668 // arguments. Idxs is the index within the nested struct From that we are
3669 // looking at now (which is of type IndexedType). IdxSkip is the number of
3670 // indices from Idxs that should be left out when inserting into the resulting
3671 // struct. To is the result struct built so far, new insertvalue instructions
3672 // build on that.
3673 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3674                                 SmallVectorImpl<unsigned> &Idxs,
3675                                 unsigned IdxSkip,
3676                                 Instruction *InsertBefore) {
3677   StructType *STy = dyn_cast<StructType>(IndexedType);
3678   if (STy) {
3679     // Save the original To argument so we can modify it
3680     Value *OrigTo = To;
3681     // General case, the type indexed by Idxs is a struct
3682     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3683       // Process each struct element recursively
3684       Idxs.push_back(i);
3685       Value *PrevTo = To;
3686       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3687                              InsertBefore);
3688       Idxs.pop_back();
3689       if (!To) {
3690         // Couldn't find any inserted value for this index? Cleanup
3691         while (PrevTo != OrigTo) {
3692           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3693           PrevTo = Del->getAggregateOperand();
3694           Del->eraseFromParent();
3695         }
3696         // Stop processing elements
3697         break;
3698       }
3699     }
3700     // If we successfully found a value for each of our subaggregates
3701     if (To)
3702       return To;
3703   }
3704   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3705   // the struct's elements had a value that was inserted directly. In the latter
3706   // case, perhaps we can't determine each of the subelements individually, but
3707   // we might be able to find the complete struct somewhere.
3708 
3709   // Find the value that is at that particular spot
3710   Value *V = FindInsertedValue(From, Idxs);
3711 
3712   if (!V)
3713     return nullptr;
3714 
3715   // Insert the value in the new (sub) aggregate
3716   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3717                                  "tmp", InsertBefore);
3718 }
3719 
3720 // This helper takes a nested struct and extracts a part of it (which is again a
3721 // struct) into a new value. For example, given the struct:
3722 // { a, { b, { c, d }, e } }
3723 // and the indices "1, 1" this returns
3724 // { c, d }.
3725 //
3726 // It does this by inserting an insertvalue for each element in the resulting
3727 // struct, as opposed to just inserting a single struct. This will only work if
3728 // each of the elements of the substruct are known (ie, inserted into From by an
3729 // insertvalue instruction somewhere).
3730 //
3731 // All inserted insertvalue instructions are inserted before InsertBefore
3732 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3733                                 Instruction *InsertBefore) {
3734   assert(InsertBefore && "Must have someplace to insert!");
3735   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3736                                                              idx_range);
3737   Value *To = UndefValue::get(IndexedType);
3738   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3739   unsigned IdxSkip = Idxs.size();
3740 
3741   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3742 }
3743 
3744 /// Given an aggregate and a sequence of indices, see if the scalar value
3745 /// indexed is already around as a register, for example if it was inserted
3746 /// directly into the aggregate.
3747 ///
3748 /// If InsertBefore is not null, this function will duplicate (modified)
3749 /// insertvalues when a part of a nested struct is extracted.
3750 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3751                                Instruction *InsertBefore) {
3752   // Nothing to index? Just return V then (this is useful at the end of our
3753   // recursion).
3754   if (idx_range.empty())
3755     return V;
3756   // We have indices, so V should have an indexable type.
3757   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3758          "Not looking at a struct or array?");
3759   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3760          "Invalid indices for type?");
3761 
3762   if (Constant *C = dyn_cast<Constant>(V)) {
3763     C = C->getAggregateElement(idx_range[0]);
3764     if (!C) return nullptr;
3765     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3766   }
3767 
3768   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3769     // Loop the indices for the insertvalue instruction in parallel with the
3770     // requested indices
3771     const unsigned *req_idx = idx_range.begin();
3772     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3773          i != e; ++i, ++req_idx) {
3774       if (req_idx == idx_range.end()) {
3775         // We can't handle this without inserting insertvalues
3776         if (!InsertBefore)
3777           return nullptr;
3778 
3779         // The requested index identifies a part of a nested aggregate. Handle
3780         // this specially. For example,
3781         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3782         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3783         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3784         // This can be changed into
3785         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3786         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3787         // which allows the unused 0,0 element from the nested struct to be
3788         // removed.
3789         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3790                                  InsertBefore);
3791       }
3792 
3793       // This insert value inserts something else than what we are looking for.
3794       // See if the (aggregate) value inserted into has the value we are
3795       // looking for, then.
3796       if (*req_idx != *i)
3797         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3798                                  InsertBefore);
3799     }
3800     // If we end up here, the indices of the insertvalue match with those
3801     // requested (though possibly only partially). Now we recursively look at
3802     // the inserted value, passing any remaining indices.
3803     return FindInsertedValue(I->getInsertedValueOperand(),
3804                              makeArrayRef(req_idx, idx_range.end()),
3805                              InsertBefore);
3806   }
3807 
3808   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3809     // If we're extracting a value from an aggregate that was extracted from
3810     // something else, we can extract from that something else directly instead.
3811     // However, we will need to chain I's indices with the requested indices.
3812 
3813     // Calculate the number of indices required
3814     unsigned size = I->getNumIndices() + idx_range.size();
3815     // Allocate some space to put the new indices in
3816     SmallVector<unsigned, 5> Idxs;
3817     Idxs.reserve(size);
3818     // Add indices from the extract value instruction
3819     Idxs.append(I->idx_begin(), I->idx_end());
3820 
3821     // Add requested indices
3822     Idxs.append(idx_range.begin(), idx_range.end());
3823 
3824     assert(Idxs.size() == size
3825            && "Number of indices added not correct?");
3826 
3827     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3828   }
3829   // Otherwise, we don't know (such as, extracting from a function return value
3830   // or load instruction)
3831   return nullptr;
3832 }
3833 
3834 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3835                                        unsigned CharSize) {
3836   // Make sure the GEP has exactly three arguments.
3837   if (GEP->getNumOperands() != 3)
3838     return false;
3839 
3840   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3841   // CharSize.
3842   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3843   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3844     return false;
3845 
3846   // Check to make sure that the first operand of the GEP is an integer and
3847   // has value 0 so that we are sure we're indexing into the initializer.
3848   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3849   if (!FirstIdx || !FirstIdx->isZero())
3850     return false;
3851 
3852   return true;
3853 }
3854 
3855 bool llvm::getConstantDataArrayInfo(const Value *V,
3856                                     ConstantDataArraySlice &Slice,
3857                                     unsigned ElementSize, uint64_t Offset) {
3858   assert(V);
3859 
3860   // Look through bitcast instructions and geps.
3861   V = V->stripPointerCasts();
3862 
3863   // If the value is a GEP instruction or constant expression, treat it as an
3864   // offset.
3865   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3866     // The GEP operator should be based on a pointer to string constant, and is
3867     // indexing into the string constant.
3868     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3869       return false;
3870 
3871     // If the second index isn't a ConstantInt, then this is a variable index
3872     // into the array.  If this occurs, we can't say anything meaningful about
3873     // the string.
3874     uint64_t StartIdx = 0;
3875     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3876       StartIdx = CI->getZExtValue();
3877     else
3878       return false;
3879     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3880                                     StartIdx + Offset);
3881   }
3882 
3883   // The GEP instruction, constant or instruction, must reference a global
3884   // variable that is a constant and is initialized. The referenced constant
3885   // initializer is the array that we'll use for optimization.
3886   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3887   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3888     return false;
3889 
3890   const ConstantDataArray *Array;
3891   ArrayType *ArrayTy;
3892   if (GV->getInitializer()->isNullValue()) {
3893     Type *GVTy = GV->getValueType();
3894     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3895       // A zeroinitializer for the array; there is no ConstantDataArray.
3896       Array = nullptr;
3897     } else {
3898       const DataLayout &DL = GV->getParent()->getDataLayout();
3899       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
3900       uint64_t Length = SizeInBytes / (ElementSize / 8);
3901       if (Length <= Offset)
3902         return false;
3903 
3904       Slice.Array = nullptr;
3905       Slice.Offset = 0;
3906       Slice.Length = Length - Offset;
3907       return true;
3908     }
3909   } else {
3910     // This must be a ConstantDataArray.
3911     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3912     if (!Array)
3913       return false;
3914     ArrayTy = Array->getType();
3915   }
3916   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3917     return false;
3918 
3919   uint64_t NumElts = ArrayTy->getArrayNumElements();
3920   if (Offset > NumElts)
3921     return false;
3922 
3923   Slice.Array = Array;
3924   Slice.Offset = Offset;
3925   Slice.Length = NumElts - Offset;
3926   return true;
3927 }
3928 
3929 /// This function computes the length of a null-terminated C string pointed to
3930 /// by V. If successful, it returns true and returns the string in Str.
3931 /// If unsuccessful, it returns false.
3932 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3933                                  uint64_t Offset, bool TrimAtNul) {
3934   ConstantDataArraySlice Slice;
3935   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3936     return false;
3937 
3938   if (Slice.Array == nullptr) {
3939     if (TrimAtNul) {
3940       Str = StringRef();
3941       return true;
3942     }
3943     if (Slice.Length == 1) {
3944       Str = StringRef("", 1);
3945       return true;
3946     }
3947     // We cannot instantiate a StringRef as we do not have an appropriate string
3948     // of 0s at hand.
3949     return false;
3950   }
3951 
3952   // Start out with the entire array in the StringRef.
3953   Str = Slice.Array->getAsString();
3954   // Skip over 'offset' bytes.
3955   Str = Str.substr(Slice.Offset);
3956 
3957   if (TrimAtNul) {
3958     // Trim off the \0 and anything after it.  If the array is not nul
3959     // terminated, we just return the whole end of string.  The client may know
3960     // some other way that the string is length-bound.
3961     Str = Str.substr(0, Str.find('\0'));
3962   }
3963   return true;
3964 }
3965 
3966 // These next two are very similar to the above, but also look through PHI
3967 // nodes.
3968 // TODO: See if we can integrate these two together.
3969 
3970 /// If we can compute the length of the string pointed to by
3971 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3972 static uint64_t GetStringLengthH(const Value *V,
3973                                  SmallPtrSetImpl<const PHINode*> &PHIs,
3974                                  unsigned CharSize) {
3975   // Look through noop bitcast instructions.
3976   V = V->stripPointerCasts();
3977 
3978   // If this is a PHI node, there are two cases: either we have already seen it
3979   // or we haven't.
3980   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3981     if (!PHIs.insert(PN).second)
3982       return ~0ULL;  // already in the set.
3983 
3984     // If it was new, see if all the input strings are the same length.
3985     uint64_t LenSoFar = ~0ULL;
3986     for (Value *IncValue : PN->incoming_values()) {
3987       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3988       if (Len == 0) return 0; // Unknown length -> unknown.
3989 
3990       if (Len == ~0ULL) continue;
3991 
3992       if (Len != LenSoFar && LenSoFar != ~0ULL)
3993         return 0;    // Disagree -> unknown.
3994       LenSoFar = Len;
3995     }
3996 
3997     // Success, all agree.
3998     return LenSoFar;
3999   }
4000 
4001   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4002   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4003     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4004     if (Len1 == 0) return 0;
4005     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4006     if (Len2 == 0) return 0;
4007     if (Len1 == ~0ULL) return Len2;
4008     if (Len2 == ~0ULL) return Len1;
4009     if (Len1 != Len2) return 0;
4010     return Len1;
4011   }
4012 
4013   // Otherwise, see if we can read the string.
4014   ConstantDataArraySlice Slice;
4015   if (!getConstantDataArrayInfo(V, Slice, CharSize))
4016     return 0;
4017 
4018   if (Slice.Array == nullptr)
4019     return 1;
4020 
4021   // Search for nul characters
4022   unsigned NullIndex = 0;
4023   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4024     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4025       break;
4026   }
4027 
4028   return NullIndex + 1;
4029 }
4030 
4031 /// If we can compute the length of the string pointed to by
4032 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4033 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4034   if (!V->getType()->isPointerTy())
4035     return 0;
4036 
4037   SmallPtrSet<const PHINode*, 32> PHIs;
4038   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4039   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4040   // an empty string as a length.
4041   return Len == ~0ULL ? 1 : Len;
4042 }
4043 
4044 const Value *
4045 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4046                                            bool MustPreserveNullness) {
4047   assert(Call &&
4048          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4049   if (const Value *RV = Call->getReturnedArgOperand())
4050     return RV;
4051   // This can be used only as a aliasing property.
4052   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4053           Call, MustPreserveNullness))
4054     return Call->getArgOperand(0);
4055   return nullptr;
4056 }
4057 
4058 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4059     const CallBase *Call, bool MustPreserveNullness) {
4060   switch (Call->getIntrinsicID()) {
4061   case Intrinsic::launder_invariant_group:
4062   case Intrinsic::strip_invariant_group:
4063   case Intrinsic::aarch64_irg:
4064   case Intrinsic::aarch64_tagp:
4065     return true;
4066   case Intrinsic::ptrmask:
4067     return !MustPreserveNullness;
4068   default:
4069     return false;
4070   }
4071 }
4072 
4073 /// \p PN defines a loop-variant pointer to an object.  Check if the
4074 /// previous iteration of the loop was referring to the same object as \p PN.
4075 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4076                                          const LoopInfo *LI) {
4077   // Find the loop-defined value.
4078   Loop *L = LI->getLoopFor(PN->getParent());
4079   if (PN->getNumIncomingValues() != 2)
4080     return true;
4081 
4082   // Find the value from previous iteration.
4083   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4084   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4085     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4086   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4087     return true;
4088 
4089   // If a new pointer is loaded in the loop, the pointer references a different
4090   // object in every iteration.  E.g.:
4091   //    for (i)
4092   //       int *p = a[i];
4093   //       ...
4094   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4095     if (!L->isLoopInvariant(Load->getPointerOperand()))
4096       return false;
4097   return true;
4098 }
4099 
4100 Value *llvm::getUnderlyingObject(Value *V, unsigned MaxLookup) {
4101   if (!V->getType()->isPointerTy())
4102     return V;
4103   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4104     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4105       V = GEP->getPointerOperand();
4106     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4107                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4108       V = cast<Operator>(V)->getOperand(0);
4109       if (!V->getType()->isPointerTy())
4110         return V;
4111     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
4112       if (GA->isInterposable())
4113         return V;
4114       V = GA->getAliasee();
4115     } else {
4116       if (auto *PHI = dyn_cast<PHINode>(V)) {
4117         // Look through single-arg phi nodes created by LCSSA.
4118         if (PHI->getNumIncomingValues() == 1) {
4119           V = PHI->getIncomingValue(0);
4120           continue;
4121         }
4122       } else if (auto *Call = dyn_cast<CallBase>(V)) {
4123         // CaptureTracking can know about special capturing properties of some
4124         // intrinsics like launder.invariant.group, that can't be expressed with
4125         // the attributes, but have properties like returning aliasing pointer.
4126         // Because some analysis may assume that nocaptured pointer is not
4127         // returned from some special intrinsic (because function would have to
4128         // be marked with returns attribute), it is crucial to use this function
4129         // because it should be in sync with CaptureTracking. Not using it may
4130         // cause weird miscompilations where 2 aliasing pointers are assumed to
4131         // noalias.
4132         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4133           V = RP;
4134           continue;
4135         }
4136       }
4137 
4138       return V;
4139     }
4140     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4141   }
4142   return V;
4143 }
4144 
4145 void llvm::getUnderlyingObjects(const Value *V,
4146                                 SmallVectorImpl<const Value *> &Objects,
4147                                 LoopInfo *LI, unsigned MaxLookup) {
4148   SmallPtrSet<const Value *, 4> Visited;
4149   SmallVector<const Value *, 4> Worklist;
4150   Worklist.push_back(V);
4151   do {
4152     const Value *P = Worklist.pop_back_val();
4153     P = getUnderlyingObject(P, MaxLookup);
4154 
4155     if (!Visited.insert(P).second)
4156       continue;
4157 
4158     if (auto *SI = dyn_cast<SelectInst>(P)) {
4159       Worklist.push_back(SI->getTrueValue());
4160       Worklist.push_back(SI->getFalseValue());
4161       continue;
4162     }
4163 
4164     if (auto *PN = dyn_cast<PHINode>(P)) {
4165       // If this PHI changes the underlying object in every iteration of the
4166       // loop, don't look through it.  Consider:
4167       //   int **A;
4168       //   for (i) {
4169       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
4170       //     Curr = A[i];
4171       //     *Prev, *Curr;
4172       //
4173       // Prev is tracking Curr one iteration behind so they refer to different
4174       // underlying objects.
4175       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4176           isSameUnderlyingObjectInLoop(PN, LI))
4177         for (Value *IncValue : PN->incoming_values())
4178           Worklist.push_back(IncValue);
4179       continue;
4180     }
4181 
4182     Objects.push_back(P);
4183   } while (!Worklist.empty());
4184 }
4185 
4186 /// This is the function that does the work of looking through basic
4187 /// ptrtoint+arithmetic+inttoptr sequences.
4188 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4189   do {
4190     if (const Operator *U = dyn_cast<Operator>(V)) {
4191       // If we find a ptrtoint, we can transfer control back to the
4192       // regular getUnderlyingObjectFromInt.
4193       if (U->getOpcode() == Instruction::PtrToInt)
4194         return U->getOperand(0);
4195       // If we find an add of a constant, a multiplied value, or a phi, it's
4196       // likely that the other operand will lead us to the base
4197       // object. We don't have to worry about the case where the
4198       // object address is somehow being computed by the multiply,
4199       // because our callers only care when the result is an
4200       // identifiable object.
4201       if (U->getOpcode() != Instruction::Add ||
4202           (!isa<ConstantInt>(U->getOperand(1)) &&
4203            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4204            !isa<PHINode>(U->getOperand(1))))
4205         return V;
4206       V = U->getOperand(0);
4207     } else {
4208       return V;
4209     }
4210     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4211   } while (true);
4212 }
4213 
4214 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4215 /// ptrtoint+arithmetic+inttoptr sequences.
4216 /// It returns false if unidentified object is found in getUnderlyingObjects.
4217 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4218                                           SmallVectorImpl<Value *> &Objects) {
4219   SmallPtrSet<const Value *, 16> Visited;
4220   SmallVector<const Value *, 4> Working(1, V);
4221   do {
4222     V = Working.pop_back_val();
4223 
4224     SmallVector<const Value *, 4> Objs;
4225     getUnderlyingObjects(V, Objs);
4226 
4227     for (const Value *V : Objs) {
4228       if (!Visited.insert(V).second)
4229         continue;
4230       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4231         const Value *O =
4232           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4233         if (O->getType()->isPointerTy()) {
4234           Working.push_back(O);
4235           continue;
4236         }
4237       }
4238       // If getUnderlyingObjects fails to find an identifiable object,
4239       // getUnderlyingObjectsForCodeGen also fails for safety.
4240       if (!isIdentifiedObject(V)) {
4241         Objects.clear();
4242         return false;
4243       }
4244       Objects.push_back(const_cast<Value *>(V));
4245     }
4246   } while (!Working.empty());
4247   return true;
4248 }
4249 
4250 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4251   AllocaInst *Result = nullptr;
4252   SmallPtrSet<Value *, 4> Visited;
4253   SmallVector<Value *, 4> Worklist;
4254 
4255   auto AddWork = [&](Value *V) {
4256     if (Visited.insert(V).second)
4257       Worklist.push_back(V);
4258   };
4259 
4260   AddWork(V);
4261   do {
4262     V = Worklist.pop_back_val();
4263     assert(Visited.count(V));
4264 
4265     if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4266       if (Result && Result != AI)
4267         return nullptr;
4268       Result = AI;
4269     } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4270       AddWork(CI->getOperand(0));
4271     } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4272       for (Value *IncValue : PN->incoming_values())
4273         AddWork(IncValue);
4274     } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4275       AddWork(SI->getTrueValue());
4276       AddWork(SI->getFalseValue());
4277     } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4278       if (OffsetZero && !GEP->hasAllZeroIndices())
4279         return nullptr;
4280       AddWork(GEP->getPointerOperand());
4281     } else {
4282       return nullptr;
4283     }
4284   } while (!Worklist.empty());
4285 
4286   return Result;
4287 }
4288 
4289 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4290     const Value *V, bool AllowLifetime, bool AllowDroppable) {
4291   for (const User *U : V->users()) {
4292     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4293     if (!II)
4294       return false;
4295 
4296     if (AllowLifetime && II->isLifetimeStartOrEnd())
4297       continue;
4298 
4299     if (AllowDroppable && II->isDroppable())
4300       continue;
4301 
4302     return false;
4303   }
4304   return true;
4305 }
4306 
4307 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4308   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4309       V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4310 }
4311 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4312   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4313       V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4314 }
4315 
4316 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4317   if (!LI.isUnordered())
4318     return true;
4319   const Function &F = *LI.getFunction();
4320   // Speculative load may create a race that did not exist in the source.
4321   return F.hasFnAttribute(Attribute::SanitizeThread) ||
4322     // Speculative load may load data from dirty regions.
4323     F.hasFnAttribute(Attribute::SanitizeAddress) ||
4324     F.hasFnAttribute(Attribute::SanitizeHWAddress);
4325 }
4326 
4327 
4328 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4329                                         const Instruction *CtxI,
4330                                         const DominatorTree *DT) {
4331   const Operator *Inst = dyn_cast<Operator>(V);
4332   if (!Inst)
4333     return false;
4334 
4335   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4336     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4337       if (C->canTrap())
4338         return false;
4339 
4340   switch (Inst->getOpcode()) {
4341   default:
4342     return true;
4343   case Instruction::UDiv:
4344   case Instruction::URem: {
4345     // x / y is undefined if y == 0.
4346     const APInt *V;
4347     if (match(Inst->getOperand(1), m_APInt(V)))
4348       return *V != 0;
4349     return false;
4350   }
4351   case Instruction::SDiv:
4352   case Instruction::SRem: {
4353     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4354     const APInt *Numerator, *Denominator;
4355     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4356       return false;
4357     // We cannot hoist this division if the denominator is 0.
4358     if (*Denominator == 0)
4359       return false;
4360     // It's safe to hoist if the denominator is not 0 or -1.
4361     if (*Denominator != -1)
4362       return true;
4363     // At this point we know that the denominator is -1.  It is safe to hoist as
4364     // long we know that the numerator is not INT_MIN.
4365     if (match(Inst->getOperand(0), m_APInt(Numerator)))
4366       return !Numerator->isMinSignedValue();
4367     // The numerator *might* be MinSignedValue.
4368     return false;
4369   }
4370   case Instruction::Load: {
4371     const LoadInst *LI = cast<LoadInst>(Inst);
4372     if (mustSuppressSpeculation(*LI))
4373       return false;
4374     const DataLayout &DL = LI->getModule()->getDataLayout();
4375     return isDereferenceableAndAlignedPointer(
4376         LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4377         DL, CtxI, DT);
4378   }
4379   case Instruction::Call: {
4380     auto *CI = cast<const CallInst>(Inst);
4381     const Function *Callee = CI->getCalledFunction();
4382 
4383     // The called function could have undefined behavior or side-effects, even
4384     // if marked readnone nounwind.
4385     return Callee && Callee->isSpeculatable();
4386   }
4387   case Instruction::VAArg:
4388   case Instruction::Alloca:
4389   case Instruction::Invoke:
4390   case Instruction::CallBr:
4391   case Instruction::PHI:
4392   case Instruction::Store:
4393   case Instruction::Ret:
4394   case Instruction::Br:
4395   case Instruction::IndirectBr:
4396   case Instruction::Switch:
4397   case Instruction::Unreachable:
4398   case Instruction::Fence:
4399   case Instruction::AtomicRMW:
4400   case Instruction::AtomicCmpXchg:
4401   case Instruction::LandingPad:
4402   case Instruction::Resume:
4403   case Instruction::CatchSwitch:
4404   case Instruction::CatchPad:
4405   case Instruction::CatchRet:
4406   case Instruction::CleanupPad:
4407   case Instruction::CleanupRet:
4408     return false; // Misc instructions which have effects
4409   }
4410 }
4411 
4412 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4413   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4414 }
4415 
4416 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4417 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4418   switch (OR) {
4419     case ConstantRange::OverflowResult::MayOverflow:
4420       return OverflowResult::MayOverflow;
4421     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4422       return OverflowResult::AlwaysOverflowsLow;
4423     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4424       return OverflowResult::AlwaysOverflowsHigh;
4425     case ConstantRange::OverflowResult::NeverOverflows:
4426       return OverflowResult::NeverOverflows;
4427   }
4428   llvm_unreachable("Unknown OverflowResult");
4429 }
4430 
4431 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4432 static ConstantRange computeConstantRangeIncludingKnownBits(
4433     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4434     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4435     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4436   KnownBits Known = computeKnownBits(
4437       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4438   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4439   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4440   ConstantRange::PreferredRangeType RangeType =
4441       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4442   return CR1.intersectWith(CR2, RangeType);
4443 }
4444 
4445 OverflowResult llvm::computeOverflowForUnsignedMul(
4446     const Value *LHS, const Value *RHS, const DataLayout &DL,
4447     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4448     bool UseInstrInfo) {
4449   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4450                                         nullptr, UseInstrInfo);
4451   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4452                                         nullptr, UseInstrInfo);
4453   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4454   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4455   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4456 }
4457 
4458 OverflowResult
4459 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4460                                   const DataLayout &DL, AssumptionCache *AC,
4461                                   const Instruction *CxtI,
4462                                   const DominatorTree *DT, bool UseInstrInfo) {
4463   // Multiplying n * m significant bits yields a result of n + m significant
4464   // bits. If the total number of significant bits does not exceed the
4465   // result bit width (minus 1), there is no overflow.
4466   // This means if we have enough leading sign bits in the operands
4467   // we can guarantee that the result does not overflow.
4468   // Ref: "Hacker's Delight" by Henry Warren
4469   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4470 
4471   // Note that underestimating the number of sign bits gives a more
4472   // conservative answer.
4473   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4474                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4475 
4476   // First handle the easy case: if we have enough sign bits there's
4477   // definitely no overflow.
4478   if (SignBits > BitWidth + 1)
4479     return OverflowResult::NeverOverflows;
4480 
4481   // There are two ambiguous cases where there can be no overflow:
4482   //   SignBits == BitWidth + 1    and
4483   //   SignBits == BitWidth
4484   // The second case is difficult to check, therefore we only handle the
4485   // first case.
4486   if (SignBits == BitWidth + 1) {
4487     // It overflows only when both arguments are negative and the true
4488     // product is exactly the minimum negative number.
4489     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4490     // For simplicity we just check if at least one side is not negative.
4491     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4492                                           nullptr, UseInstrInfo);
4493     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4494                                           nullptr, UseInstrInfo);
4495     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4496       return OverflowResult::NeverOverflows;
4497   }
4498   return OverflowResult::MayOverflow;
4499 }
4500 
4501 OverflowResult llvm::computeOverflowForUnsignedAdd(
4502     const Value *LHS, const Value *RHS, const DataLayout &DL,
4503     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4504     bool UseInstrInfo) {
4505   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4506       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4507       nullptr, UseInstrInfo);
4508   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4509       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4510       nullptr, UseInstrInfo);
4511   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4512 }
4513 
4514 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4515                                                   const Value *RHS,
4516                                                   const AddOperator *Add,
4517                                                   const DataLayout &DL,
4518                                                   AssumptionCache *AC,
4519                                                   const Instruction *CxtI,
4520                                                   const DominatorTree *DT) {
4521   if (Add && Add->hasNoSignedWrap()) {
4522     return OverflowResult::NeverOverflows;
4523   }
4524 
4525   // If LHS and RHS each have at least two sign bits, the addition will look
4526   // like
4527   //
4528   // XX..... +
4529   // YY.....
4530   //
4531   // If the carry into the most significant position is 0, X and Y can't both
4532   // be 1 and therefore the carry out of the addition is also 0.
4533   //
4534   // If the carry into the most significant position is 1, X and Y can't both
4535   // be 0 and therefore the carry out of the addition is also 1.
4536   //
4537   // Since the carry into the most significant position is always equal to
4538   // the carry out of the addition, there is no signed overflow.
4539   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4540       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4541     return OverflowResult::NeverOverflows;
4542 
4543   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4544       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4545   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4546       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4547   OverflowResult OR =
4548       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4549   if (OR != OverflowResult::MayOverflow)
4550     return OR;
4551 
4552   // The remaining code needs Add to be available. Early returns if not so.
4553   if (!Add)
4554     return OverflowResult::MayOverflow;
4555 
4556   // If the sign of Add is the same as at least one of the operands, this add
4557   // CANNOT overflow. If this can be determined from the known bits of the
4558   // operands the above signedAddMayOverflow() check will have already done so.
4559   // The only other way to improve on the known bits is from an assumption, so
4560   // call computeKnownBitsFromAssume() directly.
4561   bool LHSOrRHSKnownNonNegative =
4562       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4563   bool LHSOrRHSKnownNegative =
4564       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4565   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4566     KnownBits AddKnown(LHSRange.getBitWidth());
4567     computeKnownBitsFromAssume(
4568         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4569     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4570         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4571       return OverflowResult::NeverOverflows;
4572   }
4573 
4574   return OverflowResult::MayOverflow;
4575 }
4576 
4577 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4578                                                    const Value *RHS,
4579                                                    const DataLayout &DL,
4580                                                    AssumptionCache *AC,
4581                                                    const Instruction *CxtI,
4582                                                    const DominatorTree *DT) {
4583   // Checking for conditions implied by dominating conditions may be expensive.
4584   // Limit it to usub_with_overflow calls for now.
4585   if (match(CxtI,
4586             m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4587     if (auto C =
4588             isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4589       if (*C)
4590         return OverflowResult::NeverOverflows;
4591       return OverflowResult::AlwaysOverflowsLow;
4592     }
4593   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4594       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4595   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4596       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4597   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4598 }
4599 
4600 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4601                                                  const Value *RHS,
4602                                                  const DataLayout &DL,
4603                                                  AssumptionCache *AC,
4604                                                  const Instruction *CxtI,
4605                                                  const DominatorTree *DT) {
4606   // If LHS and RHS each have at least two sign bits, the subtraction
4607   // cannot overflow.
4608   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4609       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4610     return OverflowResult::NeverOverflows;
4611 
4612   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4613       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4614   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4615       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4616   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4617 }
4618 
4619 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4620                                      const DominatorTree &DT) {
4621   SmallVector<const BranchInst *, 2> GuardingBranches;
4622   SmallVector<const ExtractValueInst *, 2> Results;
4623 
4624   for (const User *U : WO->users()) {
4625     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4626       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4627 
4628       if (EVI->getIndices()[0] == 0)
4629         Results.push_back(EVI);
4630       else {
4631         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4632 
4633         for (const auto *U : EVI->users())
4634           if (const auto *B = dyn_cast<BranchInst>(U)) {
4635             assert(B->isConditional() && "How else is it using an i1?");
4636             GuardingBranches.push_back(B);
4637           }
4638       }
4639     } else {
4640       // We are using the aggregate directly in a way we don't want to analyze
4641       // here (storing it to a global, say).
4642       return false;
4643     }
4644   }
4645 
4646   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4647     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4648     if (!NoWrapEdge.isSingleEdge())
4649       return false;
4650 
4651     // Check if all users of the add are provably no-wrap.
4652     for (const auto *Result : Results) {
4653       // If the extractvalue itself is not executed on overflow, the we don't
4654       // need to check each use separately, since domination is transitive.
4655       if (DT.dominates(NoWrapEdge, Result->getParent()))
4656         continue;
4657 
4658       for (auto &RU : Result->uses())
4659         if (!DT.dominates(NoWrapEdge, RU))
4660           return false;
4661     }
4662 
4663     return true;
4664   };
4665 
4666   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4667 }
4668 
4669 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4670   // See whether I has flags that may create poison
4671   if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4672     if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4673       return true;
4674   }
4675   if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4676     if (ExactOp->isExact())
4677       return true;
4678   if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4679     auto FMF = FP->getFastMathFlags();
4680     if (FMF.noNaNs() || FMF.noInfs())
4681       return true;
4682   }
4683 
4684   unsigned Opcode = Op->getOpcode();
4685 
4686   // Check whether opcode is a poison/undef-generating operation
4687   switch (Opcode) {
4688   case Instruction::Shl:
4689   case Instruction::AShr:
4690   case Instruction::LShr: {
4691     // Shifts return poison if shiftwidth is larger than the bitwidth.
4692     if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4693       SmallVector<Constant *, 4> ShiftAmounts;
4694       if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4695         unsigned NumElts = FVTy->getNumElements();
4696         for (unsigned i = 0; i < NumElts; ++i)
4697           ShiftAmounts.push_back(C->getAggregateElement(i));
4698       } else if (isa<ScalableVectorType>(C->getType()))
4699         return true; // Can't tell, just return true to be safe
4700       else
4701         ShiftAmounts.push_back(C);
4702 
4703       bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4704         auto *CI = dyn_cast<ConstantInt>(C);
4705         return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4706       });
4707       return !Safe;
4708     }
4709     return true;
4710   }
4711   case Instruction::FPToSI:
4712   case Instruction::FPToUI:
4713     // fptosi/ui yields poison if the resulting value does not fit in the
4714     // destination type.
4715     return true;
4716   case Instruction::Call:
4717   case Instruction::CallBr:
4718   case Instruction::Invoke: {
4719     const auto *CB = cast<CallBase>(Op);
4720     return !CB->hasRetAttr(Attribute::NoUndef);
4721   }
4722   case Instruction::InsertElement:
4723   case Instruction::ExtractElement: {
4724     // If index exceeds the length of the vector, it returns poison
4725     auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4726     unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4727     auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4728     if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
4729       return true;
4730     return false;
4731   }
4732   case Instruction::ShuffleVector: {
4733     // shufflevector may return undef.
4734     if (PoisonOnly)
4735       return false;
4736     ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4737                              ? cast<ConstantExpr>(Op)->getShuffleMask()
4738                              : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4739     return any_of(Mask, [](int Elt) { return Elt == UndefMaskElem; });
4740   }
4741   case Instruction::FNeg:
4742   case Instruction::PHI:
4743   case Instruction::Select:
4744   case Instruction::URem:
4745   case Instruction::SRem:
4746   case Instruction::ExtractValue:
4747   case Instruction::InsertValue:
4748   case Instruction::Freeze:
4749   case Instruction::ICmp:
4750   case Instruction::FCmp:
4751     return false;
4752   case Instruction::GetElementPtr: {
4753     const auto *GEP = cast<GEPOperator>(Op);
4754     return GEP->isInBounds();
4755   }
4756   default: {
4757     const auto *CE = dyn_cast<ConstantExpr>(Op);
4758     if (isa<CastInst>(Op) || (CE && CE->isCast()))
4759       return false;
4760     else if (Instruction::isBinaryOp(Opcode))
4761       return false;
4762     // Be conservative and return true.
4763     return true;
4764   }
4765   }
4766 }
4767 
4768 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
4769   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
4770 }
4771 
4772 bool llvm::canCreatePoison(const Operator *Op) {
4773   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
4774 }
4775 
4776 static bool programUndefinedIfUndefOrPoison(const Value *V,
4777                                             bool PoisonOnly);
4778 
4779 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
4780                                              AssumptionCache *AC,
4781                                              const Instruction *CtxI,
4782                                              const DominatorTree *DT,
4783                                              unsigned Depth, bool PoisonOnly) {
4784   if (Depth >= MaxAnalysisRecursionDepth)
4785     return false;
4786 
4787   if (isa<MetadataAsValue>(V))
4788     return false;
4789 
4790   if (const auto *A = dyn_cast<Argument>(V)) {
4791     if (A->hasAttribute(Attribute::NoUndef))
4792       return true;
4793   }
4794 
4795   if (auto *C = dyn_cast<Constant>(V)) {
4796     if (isa<UndefValue>(C))
4797       return PoisonOnly;
4798 
4799     if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
4800         isa<ConstantPointerNull>(C) || isa<Function>(C))
4801       return true;
4802 
4803     if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
4804       return (PoisonOnly || !C->containsUndefElement()) &&
4805              !C->containsConstantExpression();
4806   }
4807 
4808   // Strip cast operations from a pointer value.
4809   // Note that stripPointerCastsSameRepresentation can strip off getelementptr
4810   // inbounds with zero offset. To guarantee that the result isn't poison, the
4811   // stripped pointer is checked as it has to be pointing into an allocated
4812   // object or be null `null` to ensure `inbounds` getelement pointers with a
4813   // zero offset could not produce poison.
4814   // It can strip off addrspacecast that do not change bit representation as
4815   // well. We believe that such addrspacecast is equivalent to no-op.
4816   auto *StrippedV = V->stripPointerCastsSameRepresentation();
4817   if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
4818       isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
4819     return true;
4820 
4821   auto OpCheck = [&](const Value *V) {
4822     return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
4823                                             PoisonOnly);
4824   };
4825 
4826   if (auto *Opr = dyn_cast<Operator>(V)) {
4827     // If the value is a freeze instruction, then it can never
4828     // be undef or poison.
4829     if (isa<FreezeInst>(V))
4830       return true;
4831 
4832     if (const auto *CB = dyn_cast<CallBase>(V)) {
4833       if (CB->hasRetAttr(Attribute::NoUndef))
4834         return true;
4835     }
4836 
4837     if (const auto *PN = dyn_cast<PHINode>(V)) {
4838       unsigned Num = PN->getNumIncomingValues();
4839       bool IsWellDefined = true;
4840       for (unsigned i = 0; i < Num; ++i) {
4841         auto *TI = PN->getIncomingBlock(i)->getTerminator();
4842         if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
4843                                               DT, Depth + 1, PoisonOnly)) {
4844           IsWellDefined = false;
4845           break;
4846         }
4847       }
4848       if (IsWellDefined)
4849         return true;
4850     } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
4851       return true;
4852   }
4853 
4854   if (auto *I = dyn_cast<LoadInst>(V))
4855     if (I->getMetadata(LLVMContext::MD_noundef))
4856       return true;
4857 
4858   if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
4859     return true;
4860 
4861   // CxtI may be null or a cloned instruction.
4862   if (!CtxI || !CtxI->getParent() || !DT)
4863     return false;
4864 
4865   auto *DNode = DT->getNode(CtxI->getParent());
4866   if (!DNode)
4867     // Unreachable block
4868     return false;
4869 
4870   // If V is used as a branch condition before reaching CtxI, V cannot be
4871   // undef or poison.
4872   //   br V, BB1, BB2
4873   // BB1:
4874   //   CtxI ; V cannot be undef or poison here
4875   auto *Dominator = DNode->getIDom();
4876   while (Dominator) {
4877     auto *TI = Dominator->getBlock()->getTerminator();
4878 
4879     Value *Cond = nullptr;
4880     if (auto BI = dyn_cast<BranchInst>(TI)) {
4881       if (BI->isConditional())
4882         Cond = BI->getCondition();
4883     } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
4884       Cond = SI->getCondition();
4885     }
4886 
4887     if (Cond) {
4888       if (Cond == V)
4889         return true;
4890       else if (PoisonOnly && isa<Operator>(Cond)) {
4891         // For poison, we can analyze further
4892         auto *Opr = cast<Operator>(Cond);
4893         if (propagatesPoison(Opr) &&
4894             any_of(Opr->operand_values(), [&](Value *Op) { return Op == V; }))
4895           return true;
4896       }
4897     }
4898 
4899     Dominator = Dominator->getIDom();
4900   }
4901 
4902   SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NoUndef};
4903   if (getKnowledgeValidInContext(V, AttrKinds, CtxI, DT, AC))
4904     return true;
4905 
4906   return false;
4907 }
4908 
4909 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
4910                                             const Instruction *CtxI,
4911                                             const DominatorTree *DT,
4912                                             unsigned Depth) {
4913   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
4914 }
4915 
4916 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
4917                                      const Instruction *CtxI,
4918                                      const DominatorTree *DT, unsigned Depth) {
4919   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
4920 }
4921 
4922 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
4923                                                  const DataLayout &DL,
4924                                                  AssumptionCache *AC,
4925                                                  const Instruction *CxtI,
4926                                                  const DominatorTree *DT) {
4927   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
4928                                        Add, DL, AC, CxtI, DT);
4929 }
4930 
4931 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
4932                                                  const Value *RHS,
4933                                                  const DataLayout &DL,
4934                                                  AssumptionCache *AC,
4935                                                  const Instruction *CxtI,
4936                                                  const DominatorTree *DT) {
4937   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4938 }
4939 
4940 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
4941   // Note: An atomic operation isn't guaranteed to return in a reasonable amount
4942   // of time because it's possible for another thread to interfere with it for an
4943   // arbitrary length of time, but programs aren't allowed to rely on that.
4944 
4945   // If there is no successor, then execution can't transfer to it.
4946   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4947     return !CRI->unwindsToCaller();
4948   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4949     return !CatchSwitch->unwindsToCaller();
4950   if (isa<ResumeInst>(I))
4951     return false;
4952   if (isa<ReturnInst>(I))
4953     return false;
4954   if (isa<UnreachableInst>(I))
4955     return false;
4956 
4957   // Calls can throw, or contain an infinite loop, or kill the process.
4958   if (const auto *CB = dyn_cast<CallBase>(I)) {
4959     // Call sites that throw have implicit non-local control flow.
4960     if (!CB->doesNotThrow())
4961       return false;
4962 
4963     // A function which doens't throw and has "willreturn" attribute will
4964     // always return.
4965     if (CB->hasFnAttr(Attribute::WillReturn))
4966       return true;
4967 
4968     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4969     // etc. and thus not return.  However, LLVM already assumes that
4970     //
4971     //  - Thread exiting actions are modeled as writes to memory invisible to
4972     //    the program.
4973     //
4974     //  - Loops that don't have side effects (side effects are volatile/atomic
4975     //    stores and IO) always terminate (see http://llvm.org/PR965).
4976     //    Furthermore IO itself is also modeled as writes to memory invisible to
4977     //    the program.
4978     //
4979     // We rely on those assumptions here, and use the memory effects of the call
4980     // target as a proxy for checking that it always returns.
4981 
4982     // FIXME: This isn't aggressive enough; a call which only writes to a global
4983     // is guaranteed to return.
4984     return CB->onlyReadsMemory() || CB->onlyAccessesArgMemory();
4985   }
4986 
4987   // Other instructions return normally.
4988   return true;
4989 }
4990 
4991 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
4992   // TODO: This is slightly conservative for invoke instruction since exiting
4993   // via an exception *is* normal control for them.
4994   for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
4995     if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
4996       return false;
4997   return true;
4998 }
4999 
5000 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5001                                                   const Loop *L) {
5002   // The loop header is guaranteed to be executed for every iteration.
5003   //
5004   // FIXME: Relax this constraint to cover all basic blocks that are
5005   // guaranteed to be executed at every iteration.
5006   if (I->getParent() != L->getHeader()) return false;
5007 
5008   for (const Instruction &LI : *L->getHeader()) {
5009     if (&LI == I) return true;
5010     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5011   }
5012   llvm_unreachable("Instruction not contained in its own parent basic block.");
5013 }
5014 
5015 bool llvm::propagatesPoison(const Operator *I) {
5016   switch (I->getOpcode()) {
5017   case Instruction::Freeze:
5018   case Instruction::Select:
5019   case Instruction::PHI:
5020   case Instruction::Call:
5021   case Instruction::Invoke:
5022     return false;
5023   case Instruction::ICmp:
5024   case Instruction::FCmp:
5025   case Instruction::GetElementPtr:
5026     return true;
5027   default:
5028     if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5029       return true;
5030 
5031     // Be conservative and return false.
5032     return false;
5033   }
5034 }
5035 
5036 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5037                                      SmallPtrSetImpl<const Value *> &Operands) {
5038   switch (I->getOpcode()) {
5039     case Instruction::Store:
5040       Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5041       break;
5042 
5043     case Instruction::Load:
5044       Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5045       break;
5046 
5047     case Instruction::AtomicCmpXchg:
5048       Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5049       break;
5050 
5051     case Instruction::AtomicRMW:
5052       Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5053       break;
5054 
5055     case Instruction::UDiv:
5056     case Instruction::SDiv:
5057     case Instruction::URem:
5058     case Instruction::SRem:
5059       Operands.insert(I->getOperand(1));
5060       break;
5061 
5062     case Instruction::Call:
5063     case Instruction::Invoke: {
5064       const CallBase *CB = cast<CallBase>(I);
5065       if (CB->isIndirectCall())
5066         Operands.insert(CB->getCalledOperand());
5067       for (unsigned i = 0; i < CB->arg_size(); ++i) {
5068         if (CB->paramHasAttr(i, Attribute::NoUndef))
5069           Operands.insert(CB->getArgOperand(i));
5070       }
5071       break;
5072     }
5073 
5074     default:
5075       break;
5076   }
5077 }
5078 
5079 bool llvm::mustTriggerUB(const Instruction *I,
5080                          const SmallSet<const Value *, 16>& KnownPoison) {
5081   SmallPtrSet<const Value *, 4> NonPoisonOps;
5082   getGuaranteedNonPoisonOps(I, NonPoisonOps);
5083 
5084   for (const auto *V : NonPoisonOps)
5085     if (KnownPoison.count(V))
5086       return true;
5087 
5088   return false;
5089 }
5090 
5091 static bool programUndefinedIfUndefOrPoison(const Value *V,
5092                                             bool PoisonOnly) {
5093   // We currently only look for uses of values within the same basic
5094   // block, as that makes it easier to guarantee that the uses will be
5095   // executed given that Inst is executed.
5096   //
5097   // FIXME: Expand this to consider uses beyond the same basic block. To do
5098   // this, look out for the distinction between post-dominance and strong
5099   // post-dominance.
5100   const BasicBlock *BB = nullptr;
5101   BasicBlock::const_iterator Begin;
5102   if (const auto *Inst = dyn_cast<Instruction>(V)) {
5103     BB = Inst->getParent();
5104     Begin = Inst->getIterator();
5105     Begin++;
5106   } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5107     BB = &Arg->getParent()->getEntryBlock();
5108     Begin = BB->begin();
5109   } else {
5110     return false;
5111   }
5112 
5113   BasicBlock::const_iterator End = BB->end();
5114 
5115   if (!PoisonOnly) {
5116     // Be conservative & just check whether a value is passed to a noundef
5117     // argument.
5118     // Instructions that raise UB with a poison operand are well-defined
5119     // or have unclear semantics when the input is partially undef.
5120     // For example, 'udiv x, (undef | 1)' isn't UB.
5121 
5122     for (auto &I : make_range(Begin, End)) {
5123       if (const auto *CB = dyn_cast<CallBase>(&I)) {
5124         for (unsigned i = 0; i < CB->arg_size(); ++i) {
5125           if (CB->paramHasAttr(i, Attribute::NoUndef) &&
5126               CB->getArgOperand(i) == V)
5127             return true;
5128         }
5129       }
5130       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5131         break;
5132     }
5133     return false;
5134   }
5135 
5136   // Set of instructions that we have proved will yield poison if Inst
5137   // does.
5138   SmallSet<const Value *, 16> YieldsPoison;
5139   SmallSet<const BasicBlock *, 4> Visited;
5140 
5141   YieldsPoison.insert(V);
5142   auto Propagate = [&](const User *User) {
5143     if (propagatesPoison(cast<Operator>(User)))
5144       YieldsPoison.insert(User);
5145   };
5146   for_each(V->users(), Propagate);
5147   Visited.insert(BB);
5148 
5149   unsigned Iter = 0;
5150   while (Iter++ < MaxAnalysisRecursionDepth) {
5151     for (auto &I : make_range(Begin, End)) {
5152       if (mustTriggerUB(&I, YieldsPoison))
5153         return true;
5154       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5155         return false;
5156 
5157       // Mark poison that propagates from I through uses of I.
5158       if (YieldsPoison.count(&I))
5159         for_each(I.users(), Propagate);
5160     }
5161 
5162     if (auto *NextBB = BB->getSingleSuccessor()) {
5163       if (Visited.insert(NextBB).second) {
5164         BB = NextBB;
5165         Begin = BB->getFirstNonPHI()->getIterator();
5166         End = BB->end();
5167         continue;
5168       }
5169     }
5170 
5171     break;
5172   }
5173   return false;
5174 }
5175 
5176 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5177   return ::programUndefinedIfUndefOrPoison(Inst, false);
5178 }
5179 
5180 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5181   return ::programUndefinedIfUndefOrPoison(Inst, true);
5182 }
5183 
5184 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5185   if (FMF.noNaNs())
5186     return true;
5187 
5188   if (auto *C = dyn_cast<ConstantFP>(V))
5189     return !C->isNaN();
5190 
5191   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5192     if (!C->getElementType()->isFloatingPointTy())
5193       return false;
5194     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5195       if (C->getElementAsAPFloat(I).isNaN())
5196         return false;
5197     }
5198     return true;
5199   }
5200 
5201   if (isa<ConstantAggregateZero>(V))
5202     return true;
5203 
5204   return false;
5205 }
5206 
5207 static bool isKnownNonZero(const Value *V) {
5208   if (auto *C = dyn_cast<ConstantFP>(V))
5209     return !C->isZero();
5210 
5211   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5212     if (!C->getElementType()->isFloatingPointTy())
5213       return false;
5214     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5215       if (C->getElementAsAPFloat(I).isZero())
5216         return false;
5217     }
5218     return true;
5219   }
5220 
5221   return false;
5222 }
5223 
5224 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5225 /// Given non-min/max outer cmp/select from the clamp pattern this
5226 /// function recognizes if it can be substitued by a "canonical" min/max
5227 /// pattern.
5228 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5229                                                Value *CmpLHS, Value *CmpRHS,
5230                                                Value *TrueVal, Value *FalseVal,
5231                                                Value *&LHS, Value *&RHS) {
5232   // Try to match
5233   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5234   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5235   // and return description of the outer Max/Min.
5236 
5237   // First, check if select has inverse order:
5238   if (CmpRHS == FalseVal) {
5239     std::swap(TrueVal, FalseVal);
5240     Pred = CmpInst::getInversePredicate(Pred);
5241   }
5242 
5243   // Assume success now. If there's no match, callers should not use these anyway.
5244   LHS = TrueVal;
5245   RHS = FalseVal;
5246 
5247   const APFloat *FC1;
5248   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5249     return {SPF_UNKNOWN, SPNB_NA, false};
5250 
5251   const APFloat *FC2;
5252   switch (Pred) {
5253   case CmpInst::FCMP_OLT:
5254   case CmpInst::FCMP_OLE:
5255   case CmpInst::FCMP_ULT:
5256   case CmpInst::FCMP_ULE:
5257     if (match(FalseVal,
5258               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5259                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5260         *FC1 < *FC2)
5261       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5262     break;
5263   case CmpInst::FCMP_OGT:
5264   case CmpInst::FCMP_OGE:
5265   case CmpInst::FCMP_UGT:
5266   case CmpInst::FCMP_UGE:
5267     if (match(FalseVal,
5268               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5269                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5270         *FC1 > *FC2)
5271       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5272     break;
5273   default:
5274     break;
5275   }
5276 
5277   return {SPF_UNKNOWN, SPNB_NA, false};
5278 }
5279 
5280 /// Recognize variations of:
5281 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5282 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5283                                       Value *CmpLHS, Value *CmpRHS,
5284                                       Value *TrueVal, Value *FalseVal) {
5285   // Swap the select operands and predicate to match the patterns below.
5286   if (CmpRHS != TrueVal) {
5287     Pred = ICmpInst::getSwappedPredicate(Pred);
5288     std::swap(TrueVal, FalseVal);
5289   }
5290   const APInt *C1;
5291   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5292     const APInt *C2;
5293     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5294     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5295         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5296       return {SPF_SMAX, SPNB_NA, false};
5297 
5298     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5299     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5300         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5301       return {SPF_SMIN, SPNB_NA, false};
5302 
5303     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5304     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5305         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5306       return {SPF_UMAX, SPNB_NA, false};
5307 
5308     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5309     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5310         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5311       return {SPF_UMIN, SPNB_NA, false};
5312   }
5313   return {SPF_UNKNOWN, SPNB_NA, false};
5314 }
5315 
5316 /// Recognize variations of:
5317 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5318 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5319                                                Value *CmpLHS, Value *CmpRHS,
5320                                                Value *TVal, Value *FVal,
5321                                                unsigned Depth) {
5322   // TODO: Allow FP min/max with nnan/nsz.
5323   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5324 
5325   Value *A = nullptr, *B = nullptr;
5326   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5327   if (!SelectPatternResult::isMinOrMax(L.Flavor))
5328     return {SPF_UNKNOWN, SPNB_NA, false};
5329 
5330   Value *C = nullptr, *D = nullptr;
5331   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5332   if (L.Flavor != R.Flavor)
5333     return {SPF_UNKNOWN, SPNB_NA, false};
5334 
5335   // We have something like: x Pred y ? min(a, b) : min(c, d).
5336   // Try to match the compare to the min/max operations of the select operands.
5337   // First, make sure we have the right compare predicate.
5338   switch (L.Flavor) {
5339   case SPF_SMIN:
5340     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5341       Pred = ICmpInst::getSwappedPredicate(Pred);
5342       std::swap(CmpLHS, CmpRHS);
5343     }
5344     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5345       break;
5346     return {SPF_UNKNOWN, SPNB_NA, false};
5347   case SPF_SMAX:
5348     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5349       Pred = ICmpInst::getSwappedPredicate(Pred);
5350       std::swap(CmpLHS, CmpRHS);
5351     }
5352     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5353       break;
5354     return {SPF_UNKNOWN, SPNB_NA, false};
5355   case SPF_UMIN:
5356     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5357       Pred = ICmpInst::getSwappedPredicate(Pred);
5358       std::swap(CmpLHS, CmpRHS);
5359     }
5360     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5361       break;
5362     return {SPF_UNKNOWN, SPNB_NA, false};
5363   case SPF_UMAX:
5364     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5365       Pred = ICmpInst::getSwappedPredicate(Pred);
5366       std::swap(CmpLHS, CmpRHS);
5367     }
5368     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5369       break;
5370     return {SPF_UNKNOWN, SPNB_NA, false};
5371   default:
5372     return {SPF_UNKNOWN, SPNB_NA, false};
5373   }
5374 
5375   // If there is a common operand in the already matched min/max and the other
5376   // min/max operands match the compare operands (either directly or inverted),
5377   // then this is min/max of the same flavor.
5378 
5379   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5380   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5381   if (D == B) {
5382     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5383                                          match(A, m_Not(m_Specific(CmpRHS)))))
5384       return {L.Flavor, SPNB_NA, false};
5385   }
5386   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5387   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5388   if (C == B) {
5389     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5390                                          match(A, m_Not(m_Specific(CmpRHS)))))
5391       return {L.Flavor, SPNB_NA, false};
5392   }
5393   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5394   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5395   if (D == A) {
5396     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5397                                          match(B, m_Not(m_Specific(CmpRHS)))))
5398       return {L.Flavor, SPNB_NA, false};
5399   }
5400   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5401   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5402   if (C == A) {
5403     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5404                                          match(B, m_Not(m_Specific(CmpRHS)))))
5405       return {L.Flavor, SPNB_NA, false};
5406   }
5407 
5408   return {SPF_UNKNOWN, SPNB_NA, false};
5409 }
5410 
5411 /// If the input value is the result of a 'not' op, constant integer, or vector
5412 /// splat of a constant integer, return the bitwise-not source value.
5413 /// TODO: This could be extended to handle non-splat vector integer constants.
5414 static Value *getNotValue(Value *V) {
5415   Value *NotV;
5416   if (match(V, m_Not(m_Value(NotV))))
5417     return NotV;
5418 
5419   const APInt *C;
5420   if (match(V, m_APInt(C)))
5421     return ConstantInt::get(V->getType(), ~(*C));
5422 
5423   return nullptr;
5424 }
5425 
5426 /// Match non-obvious integer minimum and maximum sequences.
5427 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5428                                        Value *CmpLHS, Value *CmpRHS,
5429                                        Value *TrueVal, Value *FalseVal,
5430                                        Value *&LHS, Value *&RHS,
5431                                        unsigned Depth) {
5432   // Assume success. If there's no match, callers should not use these anyway.
5433   LHS = TrueVal;
5434   RHS = FalseVal;
5435 
5436   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5437   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5438     return SPR;
5439 
5440   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5441   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5442     return SPR;
5443 
5444   // Look through 'not' ops to find disguised min/max.
5445   // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5446   // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5447   if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5448     switch (Pred) {
5449     case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5450     case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5451     case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5452     case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5453     default: break;
5454     }
5455   }
5456 
5457   // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5458   // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5459   if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5460     switch (Pred) {
5461     case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5462     case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5463     case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5464     case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5465     default: break;
5466     }
5467   }
5468 
5469   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5470     return {SPF_UNKNOWN, SPNB_NA, false};
5471 
5472   // Z = X -nsw Y
5473   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5474   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5475   if (match(TrueVal, m_Zero()) &&
5476       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5477     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5478 
5479   // Z = X -nsw Y
5480   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5481   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5482   if (match(FalseVal, m_Zero()) &&
5483       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5484     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5485 
5486   const APInt *C1;
5487   if (!match(CmpRHS, m_APInt(C1)))
5488     return {SPF_UNKNOWN, SPNB_NA, false};
5489 
5490   // An unsigned min/max can be written with a signed compare.
5491   const APInt *C2;
5492   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5493       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5494     // Is the sign bit set?
5495     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5496     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5497     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5498         C2->isMaxSignedValue())
5499       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5500 
5501     // Is the sign bit clear?
5502     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5503     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5504     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5505         C2->isMinSignedValue())
5506       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5507   }
5508 
5509   return {SPF_UNKNOWN, SPNB_NA, false};
5510 }
5511 
5512 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5513   assert(X && Y && "Invalid operand");
5514 
5515   // X = sub (0, Y) || X = sub nsw (0, Y)
5516   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5517       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5518     return true;
5519 
5520   // Y = sub (0, X) || Y = sub nsw (0, X)
5521   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5522       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5523     return true;
5524 
5525   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5526   Value *A, *B;
5527   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5528                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5529          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5530                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5531 }
5532 
5533 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5534                                               FastMathFlags FMF,
5535                                               Value *CmpLHS, Value *CmpRHS,
5536                                               Value *TrueVal, Value *FalseVal,
5537                                               Value *&LHS, Value *&RHS,
5538                                               unsigned Depth) {
5539   if (CmpInst::isFPPredicate(Pred)) {
5540     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5541     // 0.0 operand, set the compare's 0.0 operands to that same value for the
5542     // purpose of identifying min/max. Disregard vector constants with undefined
5543     // elements because those can not be back-propagated for analysis.
5544     Value *OutputZeroVal = nullptr;
5545     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5546         !cast<Constant>(TrueVal)->containsUndefElement())
5547       OutputZeroVal = TrueVal;
5548     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5549              !cast<Constant>(FalseVal)->containsUndefElement())
5550       OutputZeroVal = FalseVal;
5551 
5552     if (OutputZeroVal) {
5553       if (match(CmpLHS, m_AnyZeroFP()))
5554         CmpLHS = OutputZeroVal;
5555       if (match(CmpRHS, m_AnyZeroFP()))
5556         CmpRHS = OutputZeroVal;
5557     }
5558   }
5559 
5560   LHS = CmpLHS;
5561   RHS = CmpRHS;
5562 
5563   // Signed zero may return inconsistent results between implementations.
5564   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5565   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5566   // Therefore, we behave conservatively and only proceed if at least one of the
5567   // operands is known to not be zero or if we don't care about signed zero.
5568   switch (Pred) {
5569   default: break;
5570   // FIXME: Include OGT/OLT/UGT/ULT.
5571   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5572   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5573     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5574         !isKnownNonZero(CmpRHS))
5575       return {SPF_UNKNOWN, SPNB_NA, false};
5576   }
5577 
5578   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5579   bool Ordered = false;
5580 
5581   // When given one NaN and one non-NaN input:
5582   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5583   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5584   //     ordered comparison fails), which could be NaN or non-NaN.
5585   // so here we discover exactly what NaN behavior is required/accepted.
5586   if (CmpInst::isFPPredicate(Pred)) {
5587     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5588     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5589 
5590     if (LHSSafe && RHSSafe) {
5591       // Both operands are known non-NaN.
5592       NaNBehavior = SPNB_RETURNS_ANY;
5593     } else if (CmpInst::isOrdered(Pred)) {
5594       // An ordered comparison will return false when given a NaN, so it
5595       // returns the RHS.
5596       Ordered = true;
5597       if (LHSSafe)
5598         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5599         NaNBehavior = SPNB_RETURNS_NAN;
5600       else if (RHSSafe)
5601         NaNBehavior = SPNB_RETURNS_OTHER;
5602       else
5603         // Completely unsafe.
5604         return {SPF_UNKNOWN, SPNB_NA, false};
5605     } else {
5606       Ordered = false;
5607       // An unordered comparison will return true when given a NaN, so it
5608       // returns the LHS.
5609       if (LHSSafe)
5610         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5611         NaNBehavior = SPNB_RETURNS_OTHER;
5612       else if (RHSSafe)
5613         NaNBehavior = SPNB_RETURNS_NAN;
5614       else
5615         // Completely unsafe.
5616         return {SPF_UNKNOWN, SPNB_NA, false};
5617     }
5618   }
5619 
5620   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5621     std::swap(CmpLHS, CmpRHS);
5622     Pred = CmpInst::getSwappedPredicate(Pred);
5623     if (NaNBehavior == SPNB_RETURNS_NAN)
5624       NaNBehavior = SPNB_RETURNS_OTHER;
5625     else if (NaNBehavior == SPNB_RETURNS_OTHER)
5626       NaNBehavior = SPNB_RETURNS_NAN;
5627     Ordered = !Ordered;
5628   }
5629 
5630   // ([if]cmp X, Y) ? X : Y
5631   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5632     switch (Pred) {
5633     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5634     case ICmpInst::ICMP_UGT:
5635     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5636     case ICmpInst::ICMP_SGT:
5637     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5638     case ICmpInst::ICMP_ULT:
5639     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5640     case ICmpInst::ICMP_SLT:
5641     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5642     case FCmpInst::FCMP_UGT:
5643     case FCmpInst::FCMP_UGE:
5644     case FCmpInst::FCMP_OGT:
5645     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5646     case FCmpInst::FCMP_ULT:
5647     case FCmpInst::FCMP_ULE:
5648     case FCmpInst::FCMP_OLT:
5649     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5650     }
5651   }
5652 
5653   if (isKnownNegation(TrueVal, FalseVal)) {
5654     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5655     // match against either LHS or sext(LHS).
5656     auto MaybeSExtCmpLHS =
5657         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5658     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5659     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5660     if (match(TrueVal, MaybeSExtCmpLHS)) {
5661       // Set the return values. If the compare uses the negated value (-X >s 0),
5662       // swap the return values because the negated value is always 'RHS'.
5663       LHS = TrueVal;
5664       RHS = FalseVal;
5665       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5666         std::swap(LHS, RHS);
5667 
5668       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5669       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5670       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5671         return {SPF_ABS, SPNB_NA, false};
5672 
5673       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5674       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5675         return {SPF_ABS, SPNB_NA, false};
5676 
5677       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5678       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5679       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5680         return {SPF_NABS, SPNB_NA, false};
5681     }
5682     else if (match(FalseVal, MaybeSExtCmpLHS)) {
5683       // Set the return values. If the compare uses the negated value (-X >s 0),
5684       // swap the return values because the negated value is always 'RHS'.
5685       LHS = FalseVal;
5686       RHS = TrueVal;
5687       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5688         std::swap(LHS, RHS);
5689 
5690       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5691       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5692       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5693         return {SPF_NABS, SPNB_NA, false};
5694 
5695       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5696       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5697       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5698         return {SPF_ABS, SPNB_NA, false};
5699     }
5700   }
5701 
5702   if (CmpInst::isIntPredicate(Pred))
5703     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5704 
5705   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5706   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5707   // semantics than minNum. Be conservative in such case.
5708   if (NaNBehavior != SPNB_RETURNS_ANY ||
5709       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5710        !isKnownNonZero(CmpRHS)))
5711     return {SPF_UNKNOWN, SPNB_NA, false};
5712 
5713   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5714 }
5715 
5716 /// Helps to match a select pattern in case of a type mismatch.
5717 ///
5718 /// The function processes the case when type of true and false values of a
5719 /// select instruction differs from type of the cmp instruction operands because
5720 /// of a cast instruction. The function checks if it is legal to move the cast
5721 /// operation after "select". If yes, it returns the new second value of
5722 /// "select" (with the assumption that cast is moved):
5723 /// 1. As operand of cast instruction when both values of "select" are same cast
5724 /// instructions.
5725 /// 2. As restored constant (by applying reverse cast operation) when the first
5726 /// value of the "select" is a cast operation and the second value is a
5727 /// constant.
5728 /// NOTE: We return only the new second value because the first value could be
5729 /// accessed as operand of cast instruction.
5730 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
5731                               Instruction::CastOps *CastOp) {
5732   auto *Cast1 = dyn_cast<CastInst>(V1);
5733   if (!Cast1)
5734     return nullptr;
5735 
5736   *CastOp = Cast1->getOpcode();
5737   Type *SrcTy = Cast1->getSrcTy();
5738   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5739     // If V1 and V2 are both the same cast from the same type, look through V1.
5740     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5741       return Cast2->getOperand(0);
5742     return nullptr;
5743   }
5744 
5745   auto *C = dyn_cast<Constant>(V2);
5746   if (!C)
5747     return nullptr;
5748 
5749   Constant *CastedTo = nullptr;
5750   switch (*CastOp) {
5751   case Instruction::ZExt:
5752     if (CmpI->isUnsigned())
5753       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5754     break;
5755   case Instruction::SExt:
5756     if (CmpI->isSigned())
5757       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
5758     break;
5759   case Instruction::Trunc:
5760     Constant *CmpConst;
5761     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
5762         CmpConst->getType() == SrcTy) {
5763       // Here we have the following case:
5764       //
5765       //   %cond = cmp iN %x, CmpConst
5766       //   %tr = trunc iN %x to iK
5767       //   %narrowsel = select i1 %cond, iK %t, iK C
5768       //
5769       // We can always move trunc after select operation:
5770       //
5771       //   %cond = cmp iN %x, CmpConst
5772       //   %widesel = select i1 %cond, iN %x, iN CmpConst
5773       //   %tr = trunc iN %widesel to iK
5774       //
5775       // Note that C could be extended in any way because we don't care about
5776       // upper bits after truncation. It can't be abs pattern, because it would
5777       // look like:
5778       //
5779       //   select i1 %cond, x, -x.
5780       //
5781       // So only min/max pattern could be matched. Such match requires widened C
5782       // == CmpConst. That is why set widened C = CmpConst, condition trunc
5783       // CmpConst == C is checked below.
5784       CastedTo = CmpConst;
5785     } else {
5786       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5787     }
5788     break;
5789   case Instruction::FPTrunc:
5790     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5791     break;
5792   case Instruction::FPExt:
5793     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5794     break;
5795   case Instruction::FPToUI:
5796     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5797     break;
5798   case Instruction::FPToSI:
5799     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5800     break;
5801   case Instruction::UIToFP:
5802     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5803     break;
5804   case Instruction::SIToFP:
5805     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5806     break;
5807   default:
5808     break;
5809   }
5810 
5811   if (!CastedTo)
5812     return nullptr;
5813 
5814   // Make sure the cast doesn't lose any information.
5815   Constant *CastedBack =
5816       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5817   if (CastedBack != C)
5818     return nullptr;
5819 
5820   return CastedTo;
5821 }
5822 
5823 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5824                                              Instruction::CastOps *CastOp,
5825                                              unsigned Depth) {
5826   if (Depth >= MaxAnalysisRecursionDepth)
5827     return {SPF_UNKNOWN, SPNB_NA, false};
5828 
5829   SelectInst *SI = dyn_cast<SelectInst>(V);
5830   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5831 
5832   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5833   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5834 
5835   Value *TrueVal = SI->getTrueValue();
5836   Value *FalseVal = SI->getFalseValue();
5837 
5838   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5839                                             CastOp, Depth);
5840 }
5841 
5842 SelectPatternResult llvm::matchDecomposedSelectPattern(
5843     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5844     Instruction::CastOps *CastOp, unsigned Depth) {
5845   CmpInst::Predicate Pred = CmpI->getPredicate();
5846   Value *CmpLHS = CmpI->getOperand(0);
5847   Value *CmpRHS = CmpI->getOperand(1);
5848   FastMathFlags FMF;
5849   if (isa<FPMathOperator>(CmpI))
5850     FMF = CmpI->getFastMathFlags();
5851 
5852   // Bail out early.
5853   if (CmpI->isEquality())
5854     return {SPF_UNKNOWN, SPNB_NA, false};
5855 
5856   // Deal with type mismatches.
5857   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5858     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5859       // If this is a potential fmin/fmax with a cast to integer, then ignore
5860       // -0.0 because there is no corresponding integer value.
5861       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5862         FMF.setNoSignedZeros();
5863       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5864                                   cast<CastInst>(TrueVal)->getOperand(0), C,
5865                                   LHS, RHS, Depth);
5866     }
5867     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5868       // If this is a potential fmin/fmax with a cast to integer, then ignore
5869       // -0.0 because there is no corresponding integer value.
5870       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5871         FMF.setNoSignedZeros();
5872       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5873                                   C, cast<CastInst>(FalseVal)->getOperand(0),
5874                                   LHS, RHS, Depth);
5875     }
5876   }
5877   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5878                               LHS, RHS, Depth);
5879 }
5880 
5881 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5882   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5883   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5884   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5885   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5886   if (SPF == SPF_FMINNUM)
5887     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5888   if (SPF == SPF_FMAXNUM)
5889     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5890   llvm_unreachable("unhandled!");
5891 }
5892 
5893 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5894   if (SPF == SPF_SMIN) return SPF_SMAX;
5895   if (SPF == SPF_UMIN) return SPF_UMAX;
5896   if (SPF == SPF_SMAX) return SPF_SMIN;
5897   if (SPF == SPF_UMAX) return SPF_UMIN;
5898   llvm_unreachable("unhandled!");
5899 }
5900 
5901 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
5902   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
5903 }
5904 
5905 std::pair<Intrinsic::ID, bool>
5906 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
5907   // Check if VL contains select instructions that can be folded into a min/max
5908   // vector intrinsic and return the intrinsic if it is possible.
5909   // TODO: Support floating point min/max.
5910   bool AllCmpSingleUse = true;
5911   SelectPatternResult SelectPattern;
5912   SelectPattern.Flavor = SPF_UNKNOWN;
5913   if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
5914         Value *LHS, *RHS;
5915         auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
5916         if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
5917             CurrentPattern.Flavor == SPF_FMINNUM ||
5918             CurrentPattern.Flavor == SPF_FMAXNUM ||
5919             !I->getType()->isIntOrIntVectorTy())
5920           return false;
5921         if (SelectPattern.Flavor != SPF_UNKNOWN &&
5922             SelectPattern.Flavor != CurrentPattern.Flavor)
5923           return false;
5924         SelectPattern = CurrentPattern;
5925         AllCmpSingleUse &=
5926             match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
5927         return true;
5928       })) {
5929     switch (SelectPattern.Flavor) {
5930     case SPF_SMIN:
5931       return {Intrinsic::smin, AllCmpSingleUse};
5932     case SPF_UMIN:
5933       return {Intrinsic::umin, AllCmpSingleUse};
5934     case SPF_SMAX:
5935       return {Intrinsic::smax, AllCmpSingleUse};
5936     case SPF_UMAX:
5937       return {Intrinsic::umax, AllCmpSingleUse};
5938     default:
5939       llvm_unreachable("unexpected select pattern flavor");
5940     }
5941   }
5942   return {Intrinsic::not_intrinsic, false};
5943 }
5944 
5945 /// Return true if "icmp Pred LHS RHS" is always true.
5946 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5947                             const Value *RHS, const DataLayout &DL,
5948                             unsigned Depth) {
5949   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
5950   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5951     return true;
5952 
5953   switch (Pred) {
5954   default:
5955     return false;
5956 
5957   case CmpInst::ICMP_SLE: {
5958     const APInt *C;
5959 
5960     // LHS s<= LHS +_{nsw} C   if C >= 0
5961     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5962       return !C->isNegative();
5963     return false;
5964   }
5965 
5966   case CmpInst::ICMP_ULE: {
5967     const APInt *C;
5968 
5969     // LHS u<= LHS +_{nuw} C   for any C
5970     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5971       return true;
5972 
5973     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5974     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5975                                        const Value *&X,
5976                                        const APInt *&CA, const APInt *&CB) {
5977       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
5978           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
5979         return true;
5980 
5981       // If X & C == 0 then (X | C) == X +_{nuw} C
5982       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
5983           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
5984         KnownBits Known(CA->getBitWidth());
5985         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
5986                          /*CxtI*/ nullptr, /*DT*/ nullptr);
5987         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
5988           return true;
5989       }
5990 
5991       return false;
5992     };
5993 
5994     const Value *X;
5995     const APInt *CLHS, *CRHS;
5996     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
5997       return CLHS->ule(*CRHS);
5998 
5999     return false;
6000   }
6001   }
6002 }
6003 
6004 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6005 /// ALHS ARHS" is true.  Otherwise, return None.
6006 static Optional<bool>
6007 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6008                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
6009                       const DataLayout &DL, unsigned Depth) {
6010   switch (Pred) {
6011   default:
6012     return None;
6013 
6014   case CmpInst::ICMP_SLT:
6015   case CmpInst::ICMP_SLE:
6016     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6017         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6018       return true;
6019     return None;
6020 
6021   case CmpInst::ICMP_ULT:
6022   case CmpInst::ICMP_ULE:
6023     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6024         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6025       return true;
6026     return None;
6027   }
6028 }
6029 
6030 /// Return true if the operands of the two compares match.  IsSwappedOps is true
6031 /// when the operands match, but are swapped.
6032 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6033                           const Value *BLHS, const Value *BRHS,
6034                           bool &IsSwappedOps) {
6035 
6036   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6037   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6038   return IsMatchingOps || IsSwappedOps;
6039 }
6040 
6041 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6042 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6043 /// Otherwise, return None if we can't infer anything.
6044 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6045                                                     CmpInst::Predicate BPred,
6046                                                     bool AreSwappedOps) {
6047   // Canonicalize the predicate as if the operands were not commuted.
6048   if (AreSwappedOps)
6049     BPred = ICmpInst::getSwappedPredicate(BPred);
6050 
6051   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6052     return true;
6053   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6054     return false;
6055 
6056   return None;
6057 }
6058 
6059 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6060 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6061 /// Otherwise, return None if we can't infer anything.
6062 static Optional<bool>
6063 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6064                                  const ConstantInt *C1,
6065                                  CmpInst::Predicate BPred,
6066                                  const ConstantInt *C2) {
6067   ConstantRange DomCR =
6068       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6069   ConstantRange CR =
6070       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
6071   ConstantRange Intersection = DomCR.intersectWith(CR);
6072   ConstantRange Difference = DomCR.difference(CR);
6073   if (Intersection.isEmptySet())
6074     return false;
6075   if (Difference.isEmptySet())
6076     return true;
6077   return None;
6078 }
6079 
6080 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6081 /// false.  Otherwise, return None if we can't infer anything.
6082 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6083                                          CmpInst::Predicate BPred,
6084                                          const Value *BLHS, const Value *BRHS,
6085                                          const DataLayout &DL, bool LHSIsTrue,
6086                                          unsigned Depth) {
6087   Value *ALHS = LHS->getOperand(0);
6088   Value *ARHS = LHS->getOperand(1);
6089 
6090   // The rest of the logic assumes the LHS condition is true.  If that's not the
6091   // case, invert the predicate to make it so.
6092   CmpInst::Predicate APred =
6093       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6094 
6095   // Can we infer anything when the two compares have matching operands?
6096   bool AreSwappedOps;
6097   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6098     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6099             APred, BPred, AreSwappedOps))
6100       return Implication;
6101     // No amount of additional analysis will infer the second condition, so
6102     // early exit.
6103     return None;
6104   }
6105 
6106   // Can we infer anything when the LHS operands match and the RHS operands are
6107   // constants (not necessarily matching)?
6108   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6109     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6110             APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6111       return Implication;
6112     // No amount of additional analysis will infer the second condition, so
6113     // early exit.
6114     return None;
6115   }
6116 
6117   if (APred == BPred)
6118     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6119   return None;
6120 }
6121 
6122 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6123 /// false.  Otherwise, return None if we can't infer anything.  We expect the
6124 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
6125 static Optional<bool>
6126 isImpliedCondAndOr(const BinaryOperator *LHS, CmpInst::Predicate RHSPred,
6127                    const Value *RHSOp0, const Value *RHSOp1,
6128 
6129                    const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6130   // The LHS must be an 'or' or an 'and' instruction.
6131   assert((LHS->getOpcode() == Instruction::And ||
6132           LHS->getOpcode() == Instruction::Or) &&
6133          "Expected LHS to be 'and' or 'or'.");
6134 
6135   assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6136 
6137   // If the result of an 'or' is false, then we know both legs of the 'or' are
6138   // false.  Similarly, if the result of an 'and' is true, then we know both
6139   // legs of the 'and' are true.
6140   Value *ALHS, *ARHS;
6141   if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
6142       (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
6143     // FIXME: Make this non-recursion.
6144     if (Optional<bool> Implication = isImpliedCondition(
6145             ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6146       return Implication;
6147     if (Optional<bool> Implication = isImpliedCondition(
6148             ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6149       return Implication;
6150     return None;
6151   }
6152   return None;
6153 }
6154 
6155 Optional<bool>
6156 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6157                          const Value *RHSOp0, const Value *RHSOp1,
6158                          const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6159   // Bail out when we hit the limit.
6160   if (Depth == MaxAnalysisRecursionDepth)
6161     return None;
6162 
6163   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6164   // example.
6165   if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6166     return None;
6167 
6168   Type *OpTy = LHS->getType();
6169   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6170 
6171   // FIXME: Extending the code below to handle vectors.
6172   if (OpTy->isVectorTy())
6173     return None;
6174 
6175   assert(OpTy->isIntegerTy(1) && "implied by above");
6176 
6177   // Both LHS and RHS are icmps.
6178   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6179   if (LHSCmp)
6180     return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6181                               Depth);
6182 
6183   /// The LHS should be an 'or' or an 'and' instruction.  We expect the RHS to
6184   /// be / an icmp. FIXME: Add support for and/or on the RHS.
6185   const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
6186   if (LHSBO) {
6187     if ((LHSBO->getOpcode() == Instruction::And ||
6188          LHSBO->getOpcode() == Instruction::Or))
6189       return isImpliedCondAndOr(LHSBO, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6190                                 Depth);
6191   }
6192   return None;
6193 }
6194 
6195 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6196                                         const DataLayout &DL, bool LHSIsTrue,
6197                                         unsigned Depth) {
6198   // LHS ==> RHS by definition
6199   if (LHS == RHS)
6200     return LHSIsTrue;
6201 
6202   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6203   if (RHSCmp)
6204     return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6205                               RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6206                               LHSIsTrue, Depth);
6207   return None;
6208 }
6209 
6210 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6211 // condition dominating ContextI or nullptr, if no condition is found.
6212 static std::pair<Value *, bool>
6213 getDomPredecessorCondition(const Instruction *ContextI) {
6214   if (!ContextI || !ContextI->getParent())
6215     return {nullptr, false};
6216 
6217   // TODO: This is a poor/cheap way to determine dominance. Should we use a
6218   // dominator tree (eg, from a SimplifyQuery) instead?
6219   const BasicBlock *ContextBB = ContextI->getParent();
6220   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6221   if (!PredBB)
6222     return {nullptr, false};
6223 
6224   // We need a conditional branch in the predecessor.
6225   Value *PredCond;
6226   BasicBlock *TrueBB, *FalseBB;
6227   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6228     return {nullptr, false};
6229 
6230   // The branch should get simplified. Don't bother simplifying this condition.
6231   if (TrueBB == FalseBB)
6232     return {nullptr, false};
6233 
6234   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6235          "Predecessor block does not point to successor?");
6236 
6237   // Is this condition implied by the predecessor condition?
6238   return {PredCond, TrueBB == ContextBB};
6239 }
6240 
6241 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6242                                              const Instruction *ContextI,
6243                                              const DataLayout &DL) {
6244   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6245   auto PredCond = getDomPredecessorCondition(ContextI);
6246   if (PredCond.first)
6247     return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6248   return None;
6249 }
6250 
6251 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6252                                              const Value *LHS, const Value *RHS,
6253                                              const Instruction *ContextI,
6254                                              const DataLayout &DL) {
6255   auto PredCond = getDomPredecessorCondition(ContextI);
6256   if (PredCond.first)
6257     return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6258                               PredCond.second);
6259   return None;
6260 }
6261 
6262 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6263                               APInt &Upper, const InstrInfoQuery &IIQ) {
6264   unsigned Width = Lower.getBitWidth();
6265   const APInt *C;
6266   switch (BO.getOpcode()) {
6267   case Instruction::Add:
6268     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6269       // FIXME: If we have both nuw and nsw, we should reduce the range further.
6270       if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6271         // 'add nuw x, C' produces [C, UINT_MAX].
6272         Lower = *C;
6273       } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6274         if (C->isNegative()) {
6275           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6276           Lower = APInt::getSignedMinValue(Width);
6277           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6278         } else {
6279           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6280           Lower = APInt::getSignedMinValue(Width) + *C;
6281           Upper = APInt::getSignedMaxValue(Width) + 1;
6282         }
6283       }
6284     }
6285     break;
6286 
6287   case Instruction::And:
6288     if (match(BO.getOperand(1), m_APInt(C)))
6289       // 'and x, C' produces [0, C].
6290       Upper = *C + 1;
6291     break;
6292 
6293   case Instruction::Or:
6294     if (match(BO.getOperand(1), m_APInt(C)))
6295       // 'or x, C' produces [C, UINT_MAX].
6296       Lower = *C;
6297     break;
6298 
6299   case Instruction::AShr:
6300     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6301       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6302       Lower = APInt::getSignedMinValue(Width).ashr(*C);
6303       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6304     } else if (match(BO.getOperand(0), m_APInt(C))) {
6305       unsigned ShiftAmount = Width - 1;
6306       if (!C->isNullValue() && IIQ.isExact(&BO))
6307         ShiftAmount = C->countTrailingZeros();
6308       if (C->isNegative()) {
6309         // 'ashr C, x' produces [C, C >> (Width-1)]
6310         Lower = *C;
6311         Upper = C->ashr(ShiftAmount) + 1;
6312       } else {
6313         // 'ashr C, x' produces [C >> (Width-1), C]
6314         Lower = C->ashr(ShiftAmount);
6315         Upper = *C + 1;
6316       }
6317     }
6318     break;
6319 
6320   case Instruction::LShr:
6321     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6322       // 'lshr x, C' produces [0, UINT_MAX >> C].
6323       Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
6324     } else if (match(BO.getOperand(0), m_APInt(C))) {
6325       // 'lshr C, x' produces [C >> (Width-1), C].
6326       unsigned ShiftAmount = Width - 1;
6327       if (!C->isNullValue() && IIQ.isExact(&BO))
6328         ShiftAmount = C->countTrailingZeros();
6329       Lower = C->lshr(ShiftAmount);
6330       Upper = *C + 1;
6331     }
6332     break;
6333 
6334   case Instruction::Shl:
6335     if (match(BO.getOperand(0), m_APInt(C))) {
6336       if (IIQ.hasNoUnsignedWrap(&BO)) {
6337         // 'shl nuw C, x' produces [C, C << CLZ(C)]
6338         Lower = *C;
6339         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6340       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6341         if (C->isNegative()) {
6342           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6343           unsigned ShiftAmount = C->countLeadingOnes() - 1;
6344           Lower = C->shl(ShiftAmount);
6345           Upper = *C + 1;
6346         } else {
6347           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6348           unsigned ShiftAmount = C->countLeadingZeros() - 1;
6349           Lower = *C;
6350           Upper = C->shl(ShiftAmount) + 1;
6351         }
6352       }
6353     }
6354     break;
6355 
6356   case Instruction::SDiv:
6357     if (match(BO.getOperand(1), m_APInt(C))) {
6358       APInt IntMin = APInt::getSignedMinValue(Width);
6359       APInt IntMax = APInt::getSignedMaxValue(Width);
6360       if (C->isAllOnesValue()) {
6361         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6362         //    where C != -1 and C != 0 and C != 1
6363         Lower = IntMin + 1;
6364         Upper = IntMax + 1;
6365       } else if (C->countLeadingZeros() < Width - 1) {
6366         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6367         //    where C != -1 and C != 0 and C != 1
6368         Lower = IntMin.sdiv(*C);
6369         Upper = IntMax.sdiv(*C);
6370         if (Lower.sgt(Upper))
6371           std::swap(Lower, Upper);
6372         Upper = Upper + 1;
6373         assert(Upper != Lower && "Upper part of range has wrapped!");
6374       }
6375     } else if (match(BO.getOperand(0), m_APInt(C))) {
6376       if (C->isMinSignedValue()) {
6377         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6378         Lower = *C;
6379         Upper = Lower.lshr(1) + 1;
6380       } else {
6381         // 'sdiv C, x' produces [-|C|, |C|].
6382         Upper = C->abs() + 1;
6383         Lower = (-Upper) + 1;
6384       }
6385     }
6386     break;
6387 
6388   case Instruction::UDiv:
6389     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6390       // 'udiv x, C' produces [0, UINT_MAX / C].
6391       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6392     } else if (match(BO.getOperand(0), m_APInt(C))) {
6393       // 'udiv C, x' produces [0, C].
6394       Upper = *C + 1;
6395     }
6396     break;
6397 
6398   case Instruction::SRem:
6399     if (match(BO.getOperand(1), m_APInt(C))) {
6400       // 'srem x, C' produces (-|C|, |C|).
6401       Upper = C->abs();
6402       Lower = (-Upper) + 1;
6403     }
6404     break;
6405 
6406   case Instruction::URem:
6407     if (match(BO.getOperand(1), m_APInt(C)))
6408       // 'urem x, C' produces [0, C).
6409       Upper = *C;
6410     break;
6411 
6412   default:
6413     break;
6414   }
6415 }
6416 
6417 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6418                                   APInt &Upper) {
6419   unsigned Width = Lower.getBitWidth();
6420   const APInt *C;
6421   switch (II.getIntrinsicID()) {
6422   case Intrinsic::ctpop:
6423   case Intrinsic::ctlz:
6424   case Intrinsic::cttz:
6425     // Maximum of set/clear bits is the bit width.
6426     assert(Lower == 0 && "Expected lower bound to be zero");
6427     Upper = Width + 1;
6428     break;
6429   case Intrinsic::uadd_sat:
6430     // uadd.sat(x, C) produces [C, UINT_MAX].
6431     if (match(II.getOperand(0), m_APInt(C)) ||
6432         match(II.getOperand(1), m_APInt(C)))
6433       Lower = *C;
6434     break;
6435   case Intrinsic::sadd_sat:
6436     if (match(II.getOperand(0), m_APInt(C)) ||
6437         match(II.getOperand(1), m_APInt(C))) {
6438       if (C->isNegative()) {
6439         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6440         Lower = APInt::getSignedMinValue(Width);
6441         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6442       } else {
6443         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6444         Lower = APInt::getSignedMinValue(Width) + *C;
6445         Upper = APInt::getSignedMaxValue(Width) + 1;
6446       }
6447     }
6448     break;
6449   case Intrinsic::usub_sat:
6450     // usub.sat(C, x) produces [0, C].
6451     if (match(II.getOperand(0), m_APInt(C)))
6452       Upper = *C + 1;
6453     // usub.sat(x, C) produces [0, UINT_MAX - C].
6454     else if (match(II.getOperand(1), m_APInt(C)))
6455       Upper = APInt::getMaxValue(Width) - *C + 1;
6456     break;
6457   case Intrinsic::ssub_sat:
6458     if (match(II.getOperand(0), m_APInt(C))) {
6459       if (C->isNegative()) {
6460         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6461         Lower = APInt::getSignedMinValue(Width);
6462         Upper = *C - APInt::getSignedMinValue(Width) + 1;
6463       } else {
6464         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6465         Lower = *C - APInt::getSignedMaxValue(Width);
6466         Upper = APInt::getSignedMaxValue(Width) + 1;
6467       }
6468     } else if (match(II.getOperand(1), m_APInt(C))) {
6469       if (C->isNegative()) {
6470         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6471         Lower = APInt::getSignedMinValue(Width) - *C;
6472         Upper = APInt::getSignedMaxValue(Width) + 1;
6473       } else {
6474         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6475         Lower = APInt::getSignedMinValue(Width);
6476         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6477       }
6478     }
6479     break;
6480   case Intrinsic::umin:
6481   case Intrinsic::umax:
6482   case Intrinsic::smin:
6483   case Intrinsic::smax:
6484     if (!match(II.getOperand(0), m_APInt(C)) &&
6485         !match(II.getOperand(1), m_APInt(C)))
6486       break;
6487 
6488     switch (II.getIntrinsicID()) {
6489     case Intrinsic::umin:
6490       Upper = *C + 1;
6491       break;
6492     case Intrinsic::umax:
6493       Lower = *C;
6494       break;
6495     case Intrinsic::smin:
6496       Lower = APInt::getSignedMinValue(Width);
6497       Upper = *C + 1;
6498       break;
6499     case Intrinsic::smax:
6500       Lower = *C;
6501       Upper = APInt::getSignedMaxValue(Width) + 1;
6502       break;
6503     default:
6504       llvm_unreachable("Must be min/max intrinsic");
6505     }
6506     break;
6507   case Intrinsic::abs:
6508     // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6509     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6510     if (match(II.getOperand(1), m_One()))
6511       Upper = APInt::getSignedMaxValue(Width) + 1;
6512     else
6513       Upper = APInt::getSignedMinValue(Width) + 1;
6514     break;
6515   default:
6516     break;
6517   }
6518 }
6519 
6520 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6521                                       APInt &Upper, const InstrInfoQuery &IIQ) {
6522   const Value *LHS = nullptr, *RHS = nullptr;
6523   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6524   if (R.Flavor == SPF_UNKNOWN)
6525     return;
6526 
6527   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6528 
6529   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6530     // If the negation part of the abs (in RHS) has the NSW flag,
6531     // then the result of abs(X) is [0..SIGNED_MAX],
6532     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6533     Lower = APInt::getNullValue(BitWidth);
6534     if (match(RHS, m_Neg(m_Specific(LHS))) &&
6535         IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6536       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6537     else
6538       Upper = APInt::getSignedMinValue(BitWidth) + 1;
6539     return;
6540   }
6541 
6542   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6543     // The result of -abs(X) is <= 0.
6544     Lower = APInt::getSignedMinValue(BitWidth);
6545     Upper = APInt(BitWidth, 1);
6546     return;
6547   }
6548 
6549   const APInt *C;
6550   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6551     return;
6552 
6553   switch (R.Flavor) {
6554     case SPF_UMIN:
6555       Upper = *C + 1;
6556       break;
6557     case SPF_UMAX:
6558       Lower = *C;
6559       break;
6560     case SPF_SMIN:
6561       Lower = APInt::getSignedMinValue(BitWidth);
6562       Upper = *C + 1;
6563       break;
6564     case SPF_SMAX:
6565       Lower = *C;
6566       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6567       break;
6568     default:
6569       break;
6570   }
6571 }
6572 
6573 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
6574                                          AssumptionCache *AC,
6575                                          const Instruction *CtxI,
6576                                          unsigned Depth) {
6577   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
6578 
6579   if (Depth == MaxAnalysisRecursionDepth)
6580     return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
6581 
6582   const APInt *C;
6583   if (match(V, m_APInt(C)))
6584     return ConstantRange(*C);
6585 
6586   InstrInfoQuery IIQ(UseInstrInfo);
6587   unsigned BitWidth = V->getType()->getScalarSizeInBits();
6588   APInt Lower = APInt(BitWidth, 0);
6589   APInt Upper = APInt(BitWidth, 0);
6590   if (auto *BO = dyn_cast<BinaryOperator>(V))
6591     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
6592   else if (auto *II = dyn_cast<IntrinsicInst>(V))
6593     setLimitsForIntrinsic(*II, Lower, Upper);
6594   else if (auto *SI = dyn_cast<SelectInst>(V))
6595     setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
6596 
6597   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
6598 
6599   if (auto *I = dyn_cast<Instruction>(V))
6600     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
6601       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
6602 
6603   if (CtxI && AC) {
6604     // Try to restrict the range based on information from assumptions.
6605     for (auto &AssumeVH : AC->assumptionsFor(V)) {
6606       if (!AssumeVH)
6607         continue;
6608       CallInst *I = cast<CallInst>(AssumeVH);
6609       assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
6610              "Got assumption for the wrong function!");
6611       assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
6612              "must be an assume intrinsic");
6613 
6614       if (!isValidAssumeForContext(I, CtxI, nullptr))
6615         continue;
6616       Value *Arg = I->getArgOperand(0);
6617       ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
6618       // Currently we just use information from comparisons.
6619       if (!Cmp || Cmp->getOperand(0) != V)
6620         continue;
6621       ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
6622                                                AC, I, Depth + 1);
6623       CR = CR.intersectWith(
6624           ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS));
6625     }
6626   }
6627 
6628   return CR;
6629 }
6630 
6631 static Optional<int64_t>
6632 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
6633   // Skip over the first indices.
6634   gep_type_iterator GTI = gep_type_begin(GEP);
6635   for (unsigned i = 1; i != Idx; ++i, ++GTI)
6636     /*skip along*/;
6637 
6638   // Compute the offset implied by the rest of the indices.
6639   int64_t Offset = 0;
6640   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
6641     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
6642     if (!OpC)
6643       return None;
6644     if (OpC->isZero())
6645       continue; // No offset.
6646 
6647     // Handle struct indices, which add their field offset to the pointer.
6648     if (StructType *STy = GTI.getStructTypeOrNull()) {
6649       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
6650       continue;
6651     }
6652 
6653     // Otherwise, we have a sequential type like an array or fixed-length
6654     // vector. Multiply the index by the ElementSize.
6655     TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
6656     if (Size.isScalable())
6657       return None;
6658     Offset += Size.getFixedSize() * OpC->getSExtValue();
6659   }
6660 
6661   return Offset;
6662 }
6663 
6664 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
6665                                         const DataLayout &DL) {
6666   Ptr1 = Ptr1->stripPointerCasts();
6667   Ptr2 = Ptr2->stripPointerCasts();
6668 
6669   // Handle the trivial case first.
6670   if (Ptr1 == Ptr2) {
6671     return 0;
6672   }
6673 
6674   const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
6675   const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
6676 
6677   // If one pointer is a GEP see if the GEP is a constant offset from the base,
6678   // as in "P" and "gep P, 1".
6679   // Also do this iteratively to handle the the following case:
6680   //   Ptr_t1 = GEP Ptr1, c1
6681   //   Ptr_t2 = GEP Ptr_t1, c2
6682   //   Ptr2 = GEP Ptr_t2, c3
6683   // where we will return c1+c2+c3.
6684   // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
6685   // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
6686   // are the same, and return the difference between offsets.
6687   auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
6688                                  const Value *Ptr) -> Optional<int64_t> {
6689     const GEPOperator *GEP_T = GEP;
6690     int64_t OffsetVal = 0;
6691     bool HasSameBase = false;
6692     while (GEP_T) {
6693       auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
6694       if (!Offset)
6695         return None;
6696       OffsetVal += *Offset;
6697       auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
6698       if (Op0 == Ptr) {
6699         HasSameBase = true;
6700         break;
6701       }
6702       GEP_T = dyn_cast<GEPOperator>(Op0);
6703     }
6704     if (!HasSameBase)
6705       return None;
6706     return OffsetVal;
6707   };
6708 
6709   if (GEP1) {
6710     auto Offset = getOffsetFromBase(GEP1, Ptr2);
6711     if (Offset)
6712       return -*Offset;
6713   }
6714   if (GEP2) {
6715     auto Offset = getOffsetFromBase(GEP2, Ptr1);
6716     if (Offset)
6717       return Offset;
6718   }
6719 
6720   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
6721   // base.  After that base, they may have some number of common (and
6722   // potentially variable) indices.  After that they handle some constant
6723   // offset, which determines their offset from each other.  At this point, we
6724   // handle no other case.
6725   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
6726     return None;
6727 
6728   // Skip any common indices and track the GEP types.
6729   unsigned Idx = 1;
6730   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
6731     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
6732       break;
6733 
6734   auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
6735   auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
6736   if (!Offset1 || !Offset2)
6737     return None;
6738   return *Offset2 - *Offset1;
6739 }
6740