1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/IntrinsicsAArch64.h"
55 #include "llvm/IR/IntrinsicsX86.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/User.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/KnownBits.h"
69 #include "llvm/Support/MathExtras.h"
70 #include <algorithm>
71 #include <array>
72 #include <cassert>
73 #include <cstdint>
74 #include <iterator>
75 #include <utility>
76 
77 using namespace llvm;
78 using namespace llvm::PatternMatch;
79 
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83                                               cl::Hidden, cl::init(20));
84 
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88   if (unsigned BitWidth = Ty->getScalarSizeInBits())
89     return BitWidth;
90 
91   return DL.getPointerTypeSizeInBits(Ty);
92 }
93 
94 namespace {
95 
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101   const DataLayout &DL;
102   AssumptionCache *AC;
103   const Instruction *CxtI;
104   const DominatorTree *DT;
105 
106   // Unlike the other analyses, this may be a nullptr because not all clients
107   // provide it currently.
108   OptimizationRemarkEmitter *ORE;
109 
110   /// Set of assumptions that should be excluded from further queries.
111   /// This is because of the potential for mutual recursion to cause
112   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113   /// classic case of this is assume(x = y), which will attempt to determine
114   /// bits in x from bits in y, which will attempt to determine bits in y from
115   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117   /// (all of which can call computeKnownBits), and so on.
118   std::array<const Value *, MaxAnalysisRecursionDepth> Excluded;
119 
120   /// If true, it is safe to use metadata during simplification.
121   InstrInfoQuery IIQ;
122 
123   unsigned NumExcluded = 0;
124 
125   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126         const DominatorTree *DT, bool UseInstrInfo,
127         OptimizationRemarkEmitter *ORE = nullptr)
128       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129 
130   Query(const Query &Q, const Value *NewExcl)
131       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132         NumExcluded(Q.NumExcluded) {
133     Excluded = Q.Excluded;
134     Excluded[NumExcluded++] = NewExcl;
135     assert(NumExcluded <= Excluded.size());
136   }
137 
138   bool isExcluded(const Value *Value) const {
139     if (NumExcluded == 0)
140       return false;
141     auto End = Excluded.begin() + NumExcluded;
142     return std::find(Excluded.begin(), End, Value) != End;
143   }
144 };
145 
146 } // end anonymous namespace
147 
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151   // If we've been provided with a context instruction, then use that (provided
152   // it has been inserted).
153   if (CxtI && CxtI->getParent())
154     return CxtI;
155 
156   // If the value is really an already-inserted instruction, then use that.
157   CxtI = dyn_cast<Instruction>(V);
158   if (CxtI && CxtI->getParent())
159     return CxtI;
160 
161   return nullptr;
162 }
163 
164 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
165                                    const APInt &DemandedElts,
166                                    APInt &DemandedLHS, APInt &DemandedRHS) {
167   // The length of scalable vectors is unknown at compile time, thus we
168   // cannot check their values
169   if (isa<ScalableVectorType>(Shuf->getType()))
170     return false;
171 
172   int NumElts =
173       cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
174   int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
175   DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
176   if (DemandedElts.isNullValue())
177     return true;
178   // Simple case of a shuffle with zeroinitializer.
179   if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
180     DemandedLHS.setBit(0);
181     return true;
182   }
183   for (int i = 0; i != NumMaskElts; ++i) {
184     if (!DemandedElts[i])
185       continue;
186     int M = Shuf->getMaskValue(i);
187     assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
188 
189     // For undef elements, we don't know anything about the common state of
190     // the shuffle result.
191     if (M == -1)
192       return false;
193     if (M < NumElts)
194       DemandedLHS.setBit(M % NumElts);
195     else
196       DemandedRHS.setBit(M % NumElts);
197   }
198 
199   return true;
200 }
201 
202 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
203                              KnownBits &Known, unsigned Depth, const Query &Q);
204 
205 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
206                              const Query &Q) {
207   // FIXME: We currently have no way to represent the DemandedElts of a scalable
208   // vector
209   if (isa<ScalableVectorType>(V->getType())) {
210     Known.resetAll();
211     return;
212   }
213 
214   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
215   APInt DemandedElts =
216       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
217   computeKnownBits(V, DemandedElts, Known, Depth, Q);
218 }
219 
220 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
221                             const DataLayout &DL, unsigned Depth,
222                             AssumptionCache *AC, const Instruction *CxtI,
223                             const DominatorTree *DT,
224                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
225   ::computeKnownBits(V, Known, Depth,
226                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
227 }
228 
229 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
230                             KnownBits &Known, const DataLayout &DL,
231                             unsigned Depth, AssumptionCache *AC,
232                             const Instruction *CxtI, const DominatorTree *DT,
233                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
234   ::computeKnownBits(V, DemandedElts, Known, Depth,
235                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
236 }
237 
238 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
239                                   unsigned Depth, const Query &Q);
240 
241 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
242                                   const Query &Q);
243 
244 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
245                                  unsigned Depth, AssumptionCache *AC,
246                                  const Instruction *CxtI,
247                                  const DominatorTree *DT,
248                                  OptimizationRemarkEmitter *ORE,
249                                  bool UseInstrInfo) {
250   return ::computeKnownBits(
251       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
252 }
253 
254 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
255                                  const DataLayout &DL, unsigned Depth,
256                                  AssumptionCache *AC, const Instruction *CxtI,
257                                  const DominatorTree *DT,
258                                  OptimizationRemarkEmitter *ORE,
259                                  bool UseInstrInfo) {
260   return ::computeKnownBits(
261       V, DemandedElts, Depth,
262       Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
263 }
264 
265 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
266                                const DataLayout &DL, AssumptionCache *AC,
267                                const Instruction *CxtI, const DominatorTree *DT,
268                                bool UseInstrInfo) {
269   assert(LHS->getType() == RHS->getType() &&
270          "LHS and RHS should have the same type");
271   assert(LHS->getType()->isIntOrIntVectorTy() &&
272          "LHS and RHS should be integers");
273   // Look for an inverted mask: (X & ~M) op (Y & M).
274   Value *M;
275   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
276       match(RHS, m_c_And(m_Specific(M), m_Value())))
277     return true;
278   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
279       match(LHS, m_c_And(m_Specific(M), m_Value())))
280     return true;
281   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
282   KnownBits LHSKnown(IT->getBitWidth());
283   KnownBits RHSKnown(IT->getBitWidth());
284   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
285   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
286   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
287 }
288 
289 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
290   for (const User *U : CxtI->users()) {
291     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
292       if (IC->isEquality())
293         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
294           if (C->isNullValue())
295             continue;
296     return false;
297   }
298   return true;
299 }
300 
301 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
302                                    const Query &Q);
303 
304 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
305                                   bool OrZero, unsigned Depth,
306                                   AssumptionCache *AC, const Instruction *CxtI,
307                                   const DominatorTree *DT, bool UseInstrInfo) {
308   return ::isKnownToBeAPowerOfTwo(
309       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
310 }
311 
312 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
313                            unsigned Depth, const Query &Q);
314 
315 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
316 
317 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
318                           AssumptionCache *AC, const Instruction *CxtI,
319                           const DominatorTree *DT, bool UseInstrInfo) {
320   return ::isKnownNonZero(V, Depth,
321                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
322 }
323 
324 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
325                               unsigned Depth, AssumptionCache *AC,
326                               const Instruction *CxtI, const DominatorTree *DT,
327                               bool UseInstrInfo) {
328   KnownBits Known =
329       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
330   return Known.isNonNegative();
331 }
332 
333 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
334                            AssumptionCache *AC, const Instruction *CxtI,
335                            const DominatorTree *DT, bool UseInstrInfo) {
336   if (auto *CI = dyn_cast<ConstantInt>(V))
337     return CI->getValue().isStrictlyPositive();
338 
339   // TODO: We'd doing two recursive queries here.  We should factor this such
340   // that only a single query is needed.
341   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
342          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
343 }
344 
345 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
346                            AssumptionCache *AC, const Instruction *CxtI,
347                            const DominatorTree *DT, bool UseInstrInfo) {
348   KnownBits Known =
349       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
350   return Known.isNegative();
351 }
352 
353 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
354 
355 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
356                            const DataLayout &DL, AssumptionCache *AC,
357                            const Instruction *CxtI, const DominatorTree *DT,
358                            bool UseInstrInfo) {
359   return ::isKnownNonEqual(V1, V2,
360                            Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
361                                  UseInstrInfo, /*ORE=*/nullptr));
362 }
363 
364 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
365                               const Query &Q);
366 
367 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
368                              const DataLayout &DL, unsigned Depth,
369                              AssumptionCache *AC, const Instruction *CxtI,
370                              const DominatorTree *DT, bool UseInstrInfo) {
371   return ::MaskedValueIsZero(
372       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
373 }
374 
375 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
376                                    unsigned Depth, const Query &Q);
377 
378 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
379                                    const Query &Q) {
380   // FIXME: We currently have no way to represent the DemandedElts of a scalable
381   // vector
382   if (isa<ScalableVectorType>(V->getType()))
383     return 1;
384 
385   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
386   APInt DemandedElts =
387       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
388   return ComputeNumSignBits(V, DemandedElts, Depth, Q);
389 }
390 
391 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
392                                   unsigned Depth, AssumptionCache *AC,
393                                   const Instruction *CxtI,
394                                   const DominatorTree *DT, bool UseInstrInfo) {
395   return ::ComputeNumSignBits(
396       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
397 }
398 
399 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
400                                    bool NSW, const APInt &DemandedElts,
401                                    KnownBits &KnownOut, KnownBits &Known2,
402                                    unsigned Depth, const Query &Q) {
403   computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
404 
405   // If one operand is unknown and we have no nowrap information,
406   // the result will be unknown independently of the second operand.
407   if (KnownOut.isUnknown() && !NSW)
408     return;
409 
410   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
411   KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
412 }
413 
414 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
415                                 const APInt &DemandedElts, KnownBits &Known,
416                                 KnownBits &Known2, unsigned Depth,
417                                 const Query &Q) {
418   unsigned BitWidth = Known.getBitWidth();
419   computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
420   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
421 
422   bool isKnownNegative = false;
423   bool isKnownNonNegative = false;
424   // If the multiplication is known not to overflow, compute the sign bit.
425   if (NSW) {
426     if (Op0 == Op1) {
427       // The product of a number with itself is non-negative.
428       isKnownNonNegative = true;
429     } else {
430       bool isKnownNonNegativeOp1 = Known.isNonNegative();
431       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
432       bool isKnownNegativeOp1 = Known.isNegative();
433       bool isKnownNegativeOp0 = Known2.isNegative();
434       // The product of two numbers with the same sign is non-negative.
435       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
436         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
437       // The product of a negative number and a non-negative number is either
438       // negative or zero.
439       if (!isKnownNonNegative)
440         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
441                            isKnownNonZero(Op0, Depth, Q)) ||
442                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
443                            isKnownNonZero(Op1, Depth, Q));
444     }
445   }
446 
447   assert(!Known.hasConflict() && !Known2.hasConflict());
448   // Compute a conservative estimate for high known-0 bits.
449   unsigned LeadZ =  std::max(Known.countMinLeadingZeros() +
450                              Known2.countMinLeadingZeros(),
451                              BitWidth) - BitWidth;
452   LeadZ = std::min(LeadZ, BitWidth);
453 
454   // The result of the bottom bits of an integer multiply can be
455   // inferred by looking at the bottom bits of both operands and
456   // multiplying them together.
457   // We can infer at least the minimum number of known trailing bits
458   // of both operands. Depending on number of trailing zeros, we can
459   // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
460   // a and b are divisible by m and n respectively.
461   // We then calculate how many of those bits are inferrable and set
462   // the output. For example, the i8 mul:
463   //  a = XXXX1100 (12)
464   //  b = XXXX1110 (14)
465   // We know the bottom 3 bits are zero since the first can be divided by
466   // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
467   // Applying the multiplication to the trimmed arguments gets:
468   //    XX11 (3)
469   //    X111 (7)
470   // -------
471   //    XX11
472   //   XX11
473   //  XX11
474   // XX11
475   // -------
476   // XXXXX01
477   // Which allows us to infer the 2 LSBs. Since we're multiplying the result
478   // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
479   // The proof for this can be described as:
480   // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
481   //      (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
482   //                    umin(countTrailingZeros(C2), C6) +
483   //                    umin(C5 - umin(countTrailingZeros(C1), C5),
484   //                         C6 - umin(countTrailingZeros(C2), C6)))) - 1)
485   // %aa = shl i8 %a, C5
486   // %bb = shl i8 %b, C6
487   // %aaa = or i8 %aa, C1
488   // %bbb = or i8 %bb, C2
489   // %mul = mul i8 %aaa, %bbb
490   // %mask = and i8 %mul, C7
491   //   =>
492   // %mask = i8 ((C1*C2)&C7)
493   // Where C5, C6 describe the known bits of %a, %b
494   // C1, C2 describe the known bottom bits of %a, %b.
495   // C7 describes the mask of the known bits of the result.
496   APInt Bottom0 = Known.One;
497   APInt Bottom1 = Known2.One;
498 
499   // How many times we'd be able to divide each argument by 2 (shr by 1).
500   // This gives us the number of trailing zeros on the multiplication result.
501   unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
502   unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
503   unsigned TrailZero0 = Known.countMinTrailingZeros();
504   unsigned TrailZero1 = Known2.countMinTrailingZeros();
505   unsigned TrailZ = TrailZero0 + TrailZero1;
506 
507   // Figure out the fewest known-bits operand.
508   unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
509                                       TrailBitsKnown1 - TrailZero1);
510   unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
511 
512   APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
513                       Bottom1.getLoBits(TrailBitsKnown1);
514 
515   Known.resetAll();
516   Known.Zero.setHighBits(LeadZ);
517   Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
518   Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
519 
520   // Only make use of no-wrap flags if we failed to compute the sign bit
521   // directly.  This matters if the multiplication always overflows, in
522   // which case we prefer to follow the result of the direct computation,
523   // though as the program is invoking undefined behaviour we can choose
524   // whatever we like here.
525   if (isKnownNonNegative && !Known.isNegative())
526     Known.makeNonNegative();
527   else if (isKnownNegative && !Known.isNonNegative())
528     Known.makeNegative();
529 }
530 
531 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
532                                              KnownBits &Known) {
533   unsigned BitWidth = Known.getBitWidth();
534   unsigned NumRanges = Ranges.getNumOperands() / 2;
535   assert(NumRanges >= 1);
536 
537   Known.Zero.setAllBits();
538   Known.One.setAllBits();
539 
540   for (unsigned i = 0; i < NumRanges; ++i) {
541     ConstantInt *Lower =
542         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
543     ConstantInt *Upper =
544         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
545     ConstantRange Range(Lower->getValue(), Upper->getValue());
546 
547     // The first CommonPrefixBits of all values in Range are equal.
548     unsigned CommonPrefixBits =
549         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
550     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
551     APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
552     Known.One &= UnsignedMax & Mask;
553     Known.Zero &= ~UnsignedMax & Mask;
554   }
555 }
556 
557 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
558   SmallVector<const Value *, 16> WorkSet(1, I);
559   SmallPtrSet<const Value *, 32> Visited;
560   SmallPtrSet<const Value *, 16> EphValues;
561 
562   // The instruction defining an assumption's condition itself is always
563   // considered ephemeral to that assumption (even if it has other
564   // non-ephemeral users). See r246696's test case for an example.
565   if (is_contained(I->operands(), E))
566     return true;
567 
568   while (!WorkSet.empty()) {
569     const Value *V = WorkSet.pop_back_val();
570     if (!Visited.insert(V).second)
571       continue;
572 
573     // If all uses of this value are ephemeral, then so is this value.
574     if (llvm::all_of(V->users(), [&](const User *U) {
575                                    return EphValues.count(U);
576                                  })) {
577       if (V == E)
578         return true;
579 
580       if (V == I || isSafeToSpeculativelyExecute(V)) {
581        EphValues.insert(V);
582        if (const User *U = dyn_cast<User>(V))
583          for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
584               J != JE; ++J)
585            WorkSet.push_back(*J);
586       }
587     }
588   }
589 
590   return false;
591 }
592 
593 // Is this an intrinsic that cannot be speculated but also cannot trap?
594 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
595   if (const CallInst *CI = dyn_cast<CallInst>(I))
596     if (Function *F = CI->getCalledFunction())
597       switch (F->getIntrinsicID()) {
598       default: break;
599       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
600       case Intrinsic::assume:
601       case Intrinsic::sideeffect:
602       case Intrinsic::dbg_declare:
603       case Intrinsic::dbg_value:
604       case Intrinsic::dbg_label:
605       case Intrinsic::invariant_start:
606       case Intrinsic::invariant_end:
607       case Intrinsic::lifetime_start:
608       case Intrinsic::lifetime_end:
609       case Intrinsic::objectsize:
610       case Intrinsic::ptr_annotation:
611       case Intrinsic::var_annotation:
612         return true;
613       }
614 
615   return false;
616 }
617 
618 bool llvm::isValidAssumeForContext(const Instruction *Inv,
619                                    const Instruction *CxtI,
620                                    const DominatorTree *DT) {
621   // There are two restrictions on the use of an assume:
622   //  1. The assume must dominate the context (or the control flow must
623   //     reach the assume whenever it reaches the context).
624   //  2. The context must not be in the assume's set of ephemeral values
625   //     (otherwise we will use the assume to prove that the condition
626   //     feeding the assume is trivially true, thus causing the removal of
627   //     the assume).
628 
629   if (Inv->getParent() == CxtI->getParent()) {
630     // If Inv and CtxI are in the same block, check if the assume (Inv) is first
631     // in the BB.
632     if (Inv->comesBefore(CxtI))
633       return true;
634 
635     // Don't let an assume affect itself - this would cause the problems
636     // `isEphemeralValueOf` is trying to prevent, and it would also make
637     // the loop below go out of bounds.
638     if (Inv == CxtI)
639       return false;
640 
641     // The context comes first, but they're both in the same block.
642     // Make sure there is nothing in between that might interrupt
643     // the control flow, not even CxtI itself.
644     for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
645       if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
646         return false;
647 
648     return !isEphemeralValueOf(Inv, CxtI);
649   }
650 
651   // Inv and CxtI are in different blocks.
652   if (DT) {
653     if (DT->dominates(Inv, CxtI))
654       return true;
655   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
656     // We don't have a DT, but this trivially dominates.
657     return true;
658   }
659 
660   return false;
661 }
662 
663 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
664   // Use of assumptions is context-sensitive. If we don't have a context, we
665   // cannot use them!
666   if (!Q.AC || !Q.CxtI)
667     return false;
668 
669   // Note that the patterns below need to be kept in sync with the code
670   // in AssumptionCache::updateAffectedValues.
671 
672   auto CmpExcludesZero = [V](ICmpInst *Cmp) {
673     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
674 
675     Value *RHS;
676     CmpInst::Predicate Pred;
677     if (!match(Cmp, m_c_ICmp(Pred, m_V, m_Value(RHS))))
678       return false;
679     // assume(v u> y) -> assume(v != 0)
680     if (Pred == ICmpInst::ICMP_UGT)
681       return true;
682 
683     // assume(v != 0)
684     // We special-case this one to ensure that we handle `assume(v != null)`.
685     if (Pred == ICmpInst::ICMP_NE)
686       return match(RHS, m_Zero());
687 
688     // All other predicates - rely on generic ConstantRange handling.
689     ConstantInt *CI;
690     if (!match(RHS, m_ConstantInt(CI)))
691       return false;
692     ConstantRange RHSRange(CI->getValue());
693     ConstantRange TrueValues =
694         ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
695     return !TrueValues.contains(APInt::getNullValue(CI->getBitWidth()));
696   };
697 
698   if (Q.CxtI && V->getType()->isPointerTy()) {
699     SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
700     if (!NullPointerIsDefined(Q.CxtI->getFunction(),
701                               V->getType()->getPointerAddressSpace()))
702       AttrKinds.push_back(Attribute::Dereferenceable);
703 
704     if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
705       return true;
706   }
707 
708   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
709     if (!AssumeVH)
710       continue;
711     CallInst *I = cast<CallInst>(AssumeVH);
712     assert(I->getFunction() == Q.CxtI->getFunction() &&
713            "Got assumption for the wrong function!");
714     if (Q.isExcluded(I))
715       continue;
716 
717     // Warning: This loop can end up being somewhat performance sensitive.
718     // We're running this loop for once for each value queried resulting in a
719     // runtime of ~O(#assumes * #values).
720 
721     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
722            "must be an assume intrinsic");
723 
724     Value *Arg = I->getArgOperand(0);
725     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
726     if (!Cmp)
727       continue;
728 
729     if (CmpExcludesZero(Cmp) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
730       return true;
731   }
732 
733   return false;
734 }
735 
736 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
737                                        unsigned Depth, const Query &Q) {
738   // Use of assumptions is context-sensitive. If we don't have a context, we
739   // cannot use them!
740   if (!Q.AC || !Q.CxtI)
741     return;
742 
743   unsigned BitWidth = Known.getBitWidth();
744 
745   // Note that the patterns below need to be kept in sync with the code
746   // in AssumptionCache::updateAffectedValues.
747 
748   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
749     if (!AssumeVH)
750       continue;
751     CallInst *I = cast<CallInst>(AssumeVH);
752     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
753            "Got assumption for the wrong function!");
754     if (Q.isExcluded(I))
755       continue;
756 
757     // Warning: This loop can end up being somewhat performance sensitive.
758     // We're running this loop for once for each value queried resulting in a
759     // runtime of ~O(#assumes * #values).
760 
761     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
762            "must be an assume intrinsic");
763 
764     Value *Arg = I->getArgOperand(0);
765 
766     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
767       assert(BitWidth == 1 && "assume operand is not i1?");
768       Known.setAllOnes();
769       return;
770     }
771     if (match(Arg, m_Not(m_Specific(V))) &&
772         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
773       assert(BitWidth == 1 && "assume operand is not i1?");
774       Known.setAllZero();
775       return;
776     }
777 
778     // The remaining tests are all recursive, so bail out if we hit the limit.
779     if (Depth == MaxAnalysisRecursionDepth)
780       continue;
781 
782     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
783     if (!Cmp)
784       continue;
785 
786     // Note that ptrtoint may change the bitwidth.
787     Value *A, *B;
788     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
789 
790     CmpInst::Predicate Pred;
791     uint64_t C;
792     switch (Cmp->getPredicate()) {
793     default:
794       break;
795     case ICmpInst::ICMP_EQ:
796       // assume(v = a)
797       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
798           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
799         KnownBits RHSKnown =
800             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
801         Known.Zero |= RHSKnown.Zero;
802         Known.One  |= RHSKnown.One;
803       // assume(v & b = a)
804       } else if (match(Cmp,
805                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
806                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
807         KnownBits RHSKnown =
808             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
809         KnownBits MaskKnown =
810             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
811 
812         // For those bits in the mask that are known to be one, we can propagate
813         // known bits from the RHS to V.
814         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
815         Known.One  |= RHSKnown.One  & MaskKnown.One;
816       // assume(~(v & b) = a)
817       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
818                                      m_Value(A))) &&
819                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
820         KnownBits RHSKnown =
821             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
822         KnownBits MaskKnown =
823             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
824 
825         // For those bits in the mask that are known to be one, we can propagate
826         // inverted known bits from the RHS to V.
827         Known.Zero |= RHSKnown.One  & MaskKnown.One;
828         Known.One  |= RHSKnown.Zero & MaskKnown.One;
829       // assume(v | b = a)
830       } else if (match(Cmp,
831                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
832                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
833         KnownBits RHSKnown =
834             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
835         KnownBits BKnown =
836             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
837 
838         // For those bits in B that are known to be zero, we can propagate known
839         // bits from the RHS to V.
840         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
841         Known.One  |= RHSKnown.One  & BKnown.Zero;
842       // assume(~(v | b) = a)
843       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
844                                      m_Value(A))) &&
845                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
846         KnownBits RHSKnown =
847             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
848         KnownBits BKnown =
849             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
850 
851         // For those bits in B that are known to be zero, we can propagate
852         // inverted known bits from the RHS to V.
853         Known.Zero |= RHSKnown.One  & BKnown.Zero;
854         Known.One  |= RHSKnown.Zero & BKnown.Zero;
855       // assume(v ^ b = a)
856       } else if (match(Cmp,
857                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
858                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
859         KnownBits RHSKnown =
860             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
861         KnownBits BKnown =
862             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
863 
864         // For those bits in B that are known to be zero, we can propagate known
865         // bits from the RHS to V. For those bits in B that are known to be one,
866         // we can propagate inverted known bits from the RHS to V.
867         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
868         Known.One  |= RHSKnown.One  & BKnown.Zero;
869         Known.Zero |= RHSKnown.One  & BKnown.One;
870         Known.One  |= RHSKnown.Zero & BKnown.One;
871       // assume(~(v ^ b) = a)
872       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
873                                      m_Value(A))) &&
874                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
875         KnownBits RHSKnown =
876             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
877         KnownBits BKnown =
878             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
879 
880         // For those bits in B that are known to be zero, we can propagate
881         // inverted known bits from the RHS to V. For those bits in B that are
882         // known to be one, we can propagate known bits from the RHS to V.
883         Known.Zero |= RHSKnown.One  & BKnown.Zero;
884         Known.One  |= RHSKnown.Zero & BKnown.Zero;
885         Known.Zero |= RHSKnown.Zero & BKnown.One;
886         Known.One  |= RHSKnown.One  & BKnown.One;
887       // assume(v << c = a)
888       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
889                                      m_Value(A))) &&
890                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
891         KnownBits RHSKnown =
892             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
893 
894         // For those bits in RHS that are known, we can propagate them to known
895         // bits in V shifted to the right by C.
896         RHSKnown.Zero.lshrInPlace(C);
897         Known.Zero |= RHSKnown.Zero;
898         RHSKnown.One.lshrInPlace(C);
899         Known.One  |= RHSKnown.One;
900       // assume(~(v << c) = a)
901       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
902                                      m_Value(A))) &&
903                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
904         KnownBits RHSKnown =
905             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
906         // For those bits in RHS that are known, we can propagate them inverted
907         // to known bits in V shifted to the right by C.
908         RHSKnown.One.lshrInPlace(C);
909         Known.Zero |= RHSKnown.One;
910         RHSKnown.Zero.lshrInPlace(C);
911         Known.One  |= RHSKnown.Zero;
912       // assume(v >> c = a)
913       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
914                                      m_Value(A))) &&
915                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
916         KnownBits RHSKnown =
917             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
918         // For those bits in RHS that are known, we can propagate them to known
919         // bits in V shifted to the right by C.
920         Known.Zero |= RHSKnown.Zero << C;
921         Known.One  |= RHSKnown.One  << C;
922       // assume(~(v >> c) = a)
923       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
924                                      m_Value(A))) &&
925                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
926         KnownBits RHSKnown =
927             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
928         // For those bits in RHS that are known, we can propagate them inverted
929         // to known bits in V shifted to the right by C.
930         Known.Zero |= RHSKnown.One  << C;
931         Known.One  |= RHSKnown.Zero << C;
932       }
933       break;
934     case ICmpInst::ICMP_SGE:
935       // assume(v >=_s c) where c is non-negative
936       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
937           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
938         KnownBits RHSKnown =
939             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
940 
941         if (RHSKnown.isNonNegative()) {
942           // We know that the sign bit is zero.
943           Known.makeNonNegative();
944         }
945       }
946       break;
947     case ICmpInst::ICMP_SGT:
948       // assume(v >_s c) where c is at least -1.
949       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
950           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
951         KnownBits RHSKnown =
952             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
953 
954         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
955           // We know that the sign bit is zero.
956           Known.makeNonNegative();
957         }
958       }
959       break;
960     case ICmpInst::ICMP_SLE:
961       // assume(v <=_s c) where c is negative
962       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
963           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
964         KnownBits RHSKnown =
965             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
966 
967         if (RHSKnown.isNegative()) {
968           // We know that the sign bit is one.
969           Known.makeNegative();
970         }
971       }
972       break;
973     case ICmpInst::ICMP_SLT:
974       // assume(v <_s c) where c is non-positive
975       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
976           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
977         KnownBits RHSKnown =
978             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
979 
980         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
981           // We know that the sign bit is one.
982           Known.makeNegative();
983         }
984       }
985       break;
986     case ICmpInst::ICMP_ULE:
987       // assume(v <=_u c)
988       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
989           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
990         KnownBits RHSKnown =
991             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
992 
993         // Whatever high bits in c are zero are known to be zero.
994         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
995       }
996       break;
997     case ICmpInst::ICMP_ULT:
998       // assume(v <_u c)
999       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
1000           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
1001         KnownBits RHSKnown =
1002             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
1003 
1004         // If the RHS is known zero, then this assumption must be wrong (nothing
1005         // is unsigned less than zero). Signal a conflict and get out of here.
1006         if (RHSKnown.isZero()) {
1007           Known.Zero.setAllBits();
1008           Known.One.setAllBits();
1009           break;
1010         }
1011 
1012         // Whatever high bits in c are zero are known to be zero (if c is a power
1013         // of 2, then one more).
1014         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
1015           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
1016         else
1017           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
1018       }
1019       break;
1020     }
1021   }
1022 
1023   // If assumptions conflict with each other or previous known bits, then we
1024   // have a logical fallacy. It's possible that the assumption is not reachable,
1025   // so this isn't a real bug. On the other hand, the program may have undefined
1026   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
1027   // clear out the known bits, try to warn the user, and hope for the best.
1028   if (Known.Zero.intersects(Known.One)) {
1029     Known.resetAll();
1030 
1031     if (Q.ORE)
1032       Q.ORE->emit([&]() {
1033         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
1034         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
1035                                           CxtI)
1036                << "Detected conflicting code assumptions. Program may "
1037                   "have undefined behavior, or compiler may have "
1038                   "internal error.";
1039       });
1040   }
1041 }
1042 
1043 /// Compute known bits from a shift operator, including those with a
1044 /// non-constant shift amount. Known is the output of this function. Known2 is a
1045 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
1046 /// operator-specific functions that, given the known-zero or known-one bits
1047 /// respectively, and a shift amount, compute the implied known-zero or
1048 /// known-one bits of the shift operator's result respectively for that shift
1049 /// amount. The results from calling KZF and KOF are conservatively combined for
1050 /// all permitted shift amounts.
1051 static void computeKnownBitsFromShiftOperator(
1052     const Operator *I, const APInt &DemandedElts, KnownBits &Known,
1053     KnownBits &Known2, unsigned Depth, const Query &Q,
1054     function_ref<APInt(const APInt &, unsigned)> KZF,
1055     function_ref<APInt(const APInt &, unsigned)> KOF) {
1056   unsigned BitWidth = Known.getBitWidth();
1057 
1058   computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1059   if (Known.isConstant()) {
1060     unsigned ShiftAmt = Known.getConstant().getLimitedValue(BitWidth - 1);
1061 
1062     computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1063     Known.Zero = KZF(Known.Zero, ShiftAmt);
1064     Known.One  = KOF(Known.One, ShiftAmt);
1065     // If the known bits conflict, this must be an overflowing left shift, so
1066     // the shift result is poison. We can return anything we want. Choose 0 for
1067     // the best folding opportunity.
1068     if (Known.hasConflict())
1069       Known.setAllZero();
1070 
1071     return;
1072   }
1073 
1074   // If the shift amount could be greater than or equal to the bit-width of the
1075   // LHS, the value could be poison, but bail out because the check below is
1076   // expensive.
1077   // TODO: Should we just carry on?
1078   if (Known.getMaxValue().uge(BitWidth)) {
1079     Known.resetAll();
1080     return;
1081   }
1082 
1083   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
1084   // BitWidth > 64 and any upper bits are known, we'll end up returning the
1085   // limit value (which implies all bits are known).
1086   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
1087   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
1088 
1089   // It would be more-clearly correct to use the two temporaries for this
1090   // calculation. Reusing the APInts here to prevent unnecessary allocations.
1091   Known.resetAll();
1092 
1093   // If we know the shifter operand is nonzero, we can sometimes infer more
1094   // known bits. However this is expensive to compute, so be lazy about it and
1095   // only compute it when absolutely necessary.
1096   Optional<bool> ShifterOperandIsNonZero;
1097 
1098   // Early exit if we can't constrain any well-defined shift amount.
1099   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1100       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1101     ShifterOperandIsNonZero =
1102         isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1103     if (!*ShifterOperandIsNonZero)
1104       return;
1105   }
1106 
1107   computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1108 
1109   Known.Zero.setAllBits();
1110   Known.One.setAllBits();
1111   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1112     // Combine the shifted known input bits only for those shift amounts
1113     // compatible with its known constraints.
1114     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1115       continue;
1116     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1117       continue;
1118     // If we know the shifter is nonzero, we may be able to infer more known
1119     // bits. This check is sunk down as far as possible to avoid the expensive
1120     // call to isKnownNonZero if the cheaper checks above fail.
1121     if (ShiftAmt == 0) {
1122       if (!ShifterOperandIsNonZero.hasValue())
1123         ShifterOperandIsNonZero =
1124             isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1125       if (*ShifterOperandIsNonZero)
1126         continue;
1127     }
1128 
1129     Known.Zero &= KZF(Known2.Zero, ShiftAmt);
1130     Known.One  &= KOF(Known2.One, ShiftAmt);
1131   }
1132 
1133   // If the known bits conflict, the result is poison. Return a 0 and hope the
1134   // caller can further optimize that.
1135   if (Known.hasConflict())
1136     Known.setAllZero();
1137 }
1138 
1139 static void computeKnownBitsFromOperator(const Operator *I,
1140                                          const APInt &DemandedElts,
1141                                          KnownBits &Known, unsigned Depth,
1142                                          const Query &Q) {
1143   unsigned BitWidth = Known.getBitWidth();
1144 
1145   KnownBits Known2(BitWidth);
1146   switch (I->getOpcode()) {
1147   default: break;
1148   case Instruction::Load:
1149     if (MDNode *MD =
1150             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1151       computeKnownBitsFromRangeMetadata(*MD, Known);
1152     break;
1153   case Instruction::And: {
1154     // If either the LHS or the RHS are Zero, the result is zero.
1155     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1156     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1157 
1158     Known &= Known2;
1159 
1160     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1161     // here we handle the more general case of adding any odd number by
1162     // matching the form add(x, add(x, y)) where y is odd.
1163     // TODO: This could be generalized to clearing any bit set in y where the
1164     // following bit is known to be unset in y.
1165     Value *X = nullptr, *Y = nullptr;
1166     if (!Known.Zero[0] && !Known.One[0] &&
1167         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1168       Known2.resetAll();
1169       computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1170       if (Known2.countMinTrailingOnes() > 0)
1171         Known.Zero.setBit(0);
1172     }
1173     break;
1174   }
1175   case Instruction::Or:
1176     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1177     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1178 
1179     Known |= Known2;
1180     break;
1181   case Instruction::Xor:
1182     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1183     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1184 
1185     Known ^= Known2;
1186     break;
1187   case Instruction::Mul: {
1188     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1189     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1190                         Known, Known2, Depth, Q);
1191     break;
1192   }
1193   case Instruction::UDiv: {
1194     // For the purposes of computing leading zeros we can conservatively
1195     // treat a udiv as a logical right shift by the power of 2 known to
1196     // be less than the denominator.
1197     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1198     unsigned LeadZ = Known2.countMinLeadingZeros();
1199 
1200     Known2.resetAll();
1201     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1202     unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1203     if (RHSMaxLeadingZeros != BitWidth)
1204       LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1205 
1206     Known.Zero.setHighBits(LeadZ);
1207     break;
1208   }
1209   case Instruction::Select: {
1210     const Value *LHS = nullptr, *RHS = nullptr;
1211     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1212     if (SelectPatternResult::isMinOrMax(SPF)) {
1213       computeKnownBits(RHS, Known, Depth + 1, Q);
1214       computeKnownBits(LHS, Known2, Depth + 1, Q);
1215       switch (SPF) {
1216       default:
1217         llvm_unreachable("Unhandled select pattern flavor!");
1218       case SPF_SMAX:
1219         Known = KnownBits::smax(Known, Known2);
1220         break;
1221       case SPF_SMIN:
1222         Known = KnownBits::smin(Known, Known2);
1223         break;
1224       case SPF_UMAX:
1225         Known = KnownBits::umax(Known, Known2);
1226         break;
1227       case SPF_UMIN:
1228         Known = KnownBits::umin(Known, Known2);
1229         break;
1230       }
1231       break;
1232     }
1233 
1234     computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1235     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1236 
1237     // Only known if known in both the LHS and RHS.
1238     Known.One &= Known2.One;
1239     Known.Zero &= Known2.Zero;
1240 
1241     if (SPF == SPF_ABS) {
1242       // RHS from matchSelectPattern returns the negation part of abs pattern.
1243       // If the negate has an NSW flag we can assume the sign bit of the result
1244       // will be 0 because that makes abs(INT_MIN) undefined.
1245       if (match(RHS, m_Neg(m_Specific(LHS))) &&
1246           Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1247         Known.Zero.setSignBit();
1248     }
1249 
1250     break;
1251   }
1252   case Instruction::FPTrunc:
1253   case Instruction::FPExt:
1254   case Instruction::FPToUI:
1255   case Instruction::FPToSI:
1256   case Instruction::SIToFP:
1257   case Instruction::UIToFP:
1258     break; // Can't work with floating point.
1259   case Instruction::PtrToInt:
1260   case Instruction::IntToPtr:
1261     // Fall through and handle them the same as zext/trunc.
1262     LLVM_FALLTHROUGH;
1263   case Instruction::ZExt:
1264   case Instruction::Trunc: {
1265     Type *SrcTy = I->getOperand(0)->getType();
1266 
1267     unsigned SrcBitWidth;
1268     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1269     // which fall through here.
1270     Type *ScalarTy = SrcTy->getScalarType();
1271     SrcBitWidth = ScalarTy->isPointerTy() ?
1272       Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1273       Q.DL.getTypeSizeInBits(ScalarTy);
1274 
1275     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1276     Known = Known.anyextOrTrunc(SrcBitWidth);
1277     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1278     Known = Known.zextOrTrunc(BitWidth);
1279     break;
1280   }
1281   case Instruction::BitCast: {
1282     Type *SrcTy = I->getOperand(0)->getType();
1283     if (SrcTy->isIntOrPtrTy() &&
1284         // TODO: For now, not handling conversions like:
1285         // (bitcast i64 %x to <2 x i32>)
1286         !I->getType()->isVectorTy()) {
1287       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1288       break;
1289     }
1290     break;
1291   }
1292   case Instruction::SExt: {
1293     // Compute the bits in the result that are not present in the input.
1294     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1295 
1296     Known = Known.trunc(SrcBitWidth);
1297     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1298     // If the sign bit of the input is known set or clear, then we know the
1299     // top bits of the result.
1300     Known = Known.sext(BitWidth);
1301     break;
1302   }
1303   case Instruction::Shl: {
1304     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1305     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1306     auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1307       APInt KZResult = KnownZero << ShiftAmt;
1308       KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1309       // If this shift has "nsw" keyword, then the result is either a poison
1310       // value or has the same sign bit as the first operand.
1311       if (NSW && KnownZero.isSignBitSet())
1312         KZResult.setSignBit();
1313       return KZResult;
1314     };
1315 
1316     auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1317       APInt KOResult = KnownOne << ShiftAmt;
1318       if (NSW && KnownOne.isSignBitSet())
1319         KOResult.setSignBit();
1320       return KOResult;
1321     };
1322 
1323     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1324                                       KZF, KOF);
1325     break;
1326   }
1327   case Instruction::LShr: {
1328     // (lshr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1329     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1330       APInt KZResult = KnownZero.lshr(ShiftAmt);
1331       // High bits known zero.
1332       KZResult.setHighBits(ShiftAmt);
1333       return KZResult;
1334     };
1335 
1336     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1337       return KnownOne.lshr(ShiftAmt);
1338     };
1339 
1340     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1341                                       KZF, KOF);
1342     break;
1343   }
1344   case Instruction::AShr: {
1345     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1346     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1347       return KnownZero.ashr(ShiftAmt);
1348     };
1349 
1350     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1351       return KnownOne.ashr(ShiftAmt);
1352     };
1353 
1354     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1355                                       KZF, KOF);
1356     break;
1357   }
1358   case Instruction::Sub: {
1359     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1360     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1361                            DemandedElts, Known, Known2, Depth, Q);
1362     break;
1363   }
1364   case Instruction::Add: {
1365     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1366     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1367                            DemandedElts, Known, Known2, Depth, Q);
1368     break;
1369   }
1370   case Instruction::SRem:
1371     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1372       APInt RA = Rem->getValue().abs();
1373       if (RA.isPowerOf2()) {
1374         APInt LowBits = RA - 1;
1375         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1376 
1377         // The low bits of the first operand are unchanged by the srem.
1378         Known.Zero = Known2.Zero & LowBits;
1379         Known.One = Known2.One & LowBits;
1380 
1381         // If the first operand is non-negative or has all low bits zero, then
1382         // the upper bits are all zero.
1383         if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1384           Known.Zero |= ~LowBits;
1385 
1386         // If the first operand is negative and not all low bits are zero, then
1387         // the upper bits are all one.
1388         if (Known2.isNegative() && LowBits.intersects(Known2.One))
1389           Known.One |= ~LowBits;
1390 
1391         assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1392         break;
1393       }
1394     }
1395 
1396     // The sign bit is the LHS's sign bit, except when the result of the
1397     // remainder is zero.
1398     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1399     // If it's known zero, our sign bit is also zero.
1400     if (Known2.isNonNegative())
1401       Known.makeNonNegative();
1402 
1403     break;
1404   case Instruction::URem: {
1405     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1406       const APInt &RA = Rem->getValue();
1407       if (RA.isPowerOf2()) {
1408         APInt LowBits = (RA - 1);
1409         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1410         Known.Zero |= ~LowBits;
1411         Known.One &= LowBits;
1412         break;
1413       }
1414     }
1415 
1416     // Since the result is less than or equal to either operand, any leading
1417     // zero bits in either operand must also exist in the result.
1418     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1419     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1420 
1421     unsigned Leaders =
1422         std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1423     Known.resetAll();
1424     Known.Zero.setHighBits(Leaders);
1425     break;
1426   }
1427   case Instruction::Alloca:
1428     Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1429     break;
1430   case Instruction::GetElementPtr: {
1431     // Analyze all of the subscripts of this getelementptr instruction
1432     // to determine if we can prove known low zero bits.
1433     KnownBits LocalKnown(BitWidth);
1434     computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1435     unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1436 
1437     gep_type_iterator GTI = gep_type_begin(I);
1438     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1439       // TrailZ can only become smaller, short-circuit if we hit zero.
1440       if (TrailZ == 0)
1441         break;
1442 
1443       Value *Index = I->getOperand(i);
1444       if (StructType *STy = GTI.getStructTypeOrNull()) {
1445         // Handle struct member offset arithmetic.
1446 
1447         // Handle case when index is vector zeroinitializer
1448         Constant *CIndex = cast<Constant>(Index);
1449         if (CIndex->isZeroValue())
1450           continue;
1451 
1452         if (CIndex->getType()->isVectorTy())
1453           Index = CIndex->getSplatValue();
1454 
1455         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1456         const StructLayout *SL = Q.DL.getStructLayout(STy);
1457         uint64_t Offset = SL->getElementOffset(Idx);
1458         TrailZ = std::min<unsigned>(TrailZ,
1459                                     countTrailingZeros(Offset));
1460       } else {
1461         // Handle array index arithmetic.
1462         Type *IndexedTy = GTI.getIndexedType();
1463         if (!IndexedTy->isSized()) {
1464           TrailZ = 0;
1465           break;
1466         }
1467         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1468         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy).getKnownMinSize();
1469         LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1470         computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1471         TrailZ = std::min(TrailZ,
1472                           unsigned(countTrailingZeros(TypeSize) +
1473                                    LocalKnown.countMinTrailingZeros()));
1474       }
1475     }
1476 
1477     Known.Zero.setLowBits(TrailZ);
1478     break;
1479   }
1480   case Instruction::PHI: {
1481     const PHINode *P = cast<PHINode>(I);
1482     // Handle the case of a simple two-predecessor recurrence PHI.
1483     // There's a lot more that could theoretically be done here, but
1484     // this is sufficient to catch some interesting cases.
1485     if (P->getNumIncomingValues() == 2) {
1486       for (unsigned i = 0; i != 2; ++i) {
1487         Value *L = P->getIncomingValue(i);
1488         Value *R = P->getIncomingValue(!i);
1489         Instruction *RInst = P->getIncomingBlock(!i)->getTerminator();
1490         Instruction *LInst = P->getIncomingBlock(i)->getTerminator();
1491         Operator *LU = dyn_cast<Operator>(L);
1492         if (!LU)
1493           continue;
1494         unsigned Opcode = LU->getOpcode();
1495         // Check for operations that have the property that if
1496         // both their operands have low zero bits, the result
1497         // will have low zero bits.
1498         if (Opcode == Instruction::Add ||
1499             Opcode == Instruction::Sub ||
1500             Opcode == Instruction::And ||
1501             Opcode == Instruction::Or ||
1502             Opcode == Instruction::Mul) {
1503           Value *LL = LU->getOperand(0);
1504           Value *LR = LU->getOperand(1);
1505           // Find a recurrence.
1506           if (LL == I)
1507             L = LR;
1508           else if (LR == I)
1509             L = LL;
1510           else
1511             continue; // Check for recurrence with L and R flipped.
1512 
1513           // Change the context instruction to the "edge" that flows into the
1514           // phi. This is important because that is where the value is actually
1515           // "evaluated" even though it is used later somewhere else. (see also
1516           // D69571).
1517           Query RecQ = Q;
1518 
1519           // Ok, we have a PHI of the form L op= R. Check for low
1520           // zero bits.
1521           RecQ.CxtI = RInst;
1522           computeKnownBits(R, Known2, Depth + 1, RecQ);
1523 
1524           // We need to take the minimum number of known bits
1525           KnownBits Known3(BitWidth);
1526           RecQ.CxtI = LInst;
1527           computeKnownBits(L, Known3, Depth + 1, RecQ);
1528 
1529           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1530                                          Known3.countMinTrailingZeros()));
1531 
1532           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1533           if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1534             // If initial value of recurrence is nonnegative, and we are adding
1535             // a nonnegative number with nsw, the result can only be nonnegative
1536             // or poison value regardless of the number of times we execute the
1537             // add in phi recurrence. If initial value is negative and we are
1538             // adding a negative number with nsw, the result can only be
1539             // negative or poison value. Similar arguments apply to sub and mul.
1540             //
1541             // (add non-negative, non-negative) --> non-negative
1542             // (add negative, negative) --> negative
1543             if (Opcode == Instruction::Add) {
1544               if (Known2.isNonNegative() && Known3.isNonNegative())
1545                 Known.makeNonNegative();
1546               else if (Known2.isNegative() && Known3.isNegative())
1547                 Known.makeNegative();
1548             }
1549 
1550             // (sub nsw non-negative, negative) --> non-negative
1551             // (sub nsw negative, non-negative) --> negative
1552             else if (Opcode == Instruction::Sub && LL == I) {
1553               if (Known2.isNonNegative() && Known3.isNegative())
1554                 Known.makeNonNegative();
1555               else if (Known2.isNegative() && Known3.isNonNegative())
1556                 Known.makeNegative();
1557             }
1558 
1559             // (mul nsw non-negative, non-negative) --> non-negative
1560             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1561                      Known3.isNonNegative())
1562               Known.makeNonNegative();
1563           }
1564 
1565           break;
1566         }
1567       }
1568     }
1569 
1570     // Unreachable blocks may have zero-operand PHI nodes.
1571     if (P->getNumIncomingValues() == 0)
1572       break;
1573 
1574     // Otherwise take the unions of the known bit sets of the operands,
1575     // taking conservative care to avoid excessive recursion.
1576     if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1577       // Skip if every incoming value references to ourself.
1578       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1579         break;
1580 
1581       Known.Zero.setAllBits();
1582       Known.One.setAllBits();
1583       for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1584         Value *IncValue = P->getIncomingValue(u);
1585         // Skip direct self references.
1586         if (IncValue == P) continue;
1587 
1588         // Change the context instruction to the "edge" that flows into the
1589         // phi. This is important because that is where the value is actually
1590         // "evaluated" even though it is used later somewhere else. (see also
1591         // D69571).
1592         Query RecQ = Q;
1593         RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1594 
1595         Known2 = KnownBits(BitWidth);
1596         // Recurse, but cap the recursion to one level, because we don't
1597         // want to waste time spinning around in loops.
1598         computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1599         Known.Zero &= Known2.Zero;
1600         Known.One &= Known2.One;
1601         // If all bits have been ruled out, there's no need to check
1602         // more operands.
1603         if (!Known.Zero && !Known.One)
1604           break;
1605       }
1606     }
1607     break;
1608   }
1609   case Instruction::Call:
1610   case Instruction::Invoke:
1611     // If range metadata is attached to this call, set known bits from that,
1612     // and then intersect with known bits based on other properties of the
1613     // function.
1614     if (MDNode *MD =
1615             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1616       computeKnownBitsFromRangeMetadata(*MD, Known);
1617     if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1618       computeKnownBits(RV, Known2, Depth + 1, Q);
1619       Known.Zero |= Known2.Zero;
1620       Known.One |= Known2.One;
1621     }
1622     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1623       switch (II->getIntrinsicID()) {
1624       default: break;
1625       case Intrinsic::abs:
1626         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1627 
1628         // If the source's MSB is zero then we know the rest of the bits.
1629         if (Known2.isNonNegative()) {
1630           Known.Zero |= Known2.Zero;
1631           Known.One |= Known2.One;
1632           break;
1633         }
1634 
1635         // Absolute value preserves trailing zero count.
1636         Known.Zero.setLowBits(Known2.Zero.countTrailingOnes());
1637 
1638         // If this call is undefined for INT_MIN, the result is positive. We
1639         // also know it can't be INT_MIN if there is a set bit that isn't the
1640         // sign bit.
1641         Known2.One.clearSignBit();
1642         if (match(II->getArgOperand(1), m_One()) || Known2.One.getBoolValue())
1643           Known.Zero.setSignBit();
1644         // FIXME: Handle known negative input?
1645         // FIXME: Calculate the negated Known bits and combine them?
1646         break;
1647       case Intrinsic::bitreverse:
1648         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1649         Known.Zero |= Known2.Zero.reverseBits();
1650         Known.One |= Known2.One.reverseBits();
1651         break;
1652       case Intrinsic::bswap:
1653         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1654         Known.Zero |= Known2.Zero.byteSwap();
1655         Known.One |= Known2.One.byteSwap();
1656         break;
1657       case Intrinsic::ctlz: {
1658         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1659         // If we have a known 1, its position is our upper bound.
1660         unsigned PossibleLZ = Known2.One.countLeadingZeros();
1661         // If this call is undefined for 0, the result will be less than 2^n.
1662         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1663           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1664         unsigned LowBits = Log2_32(PossibleLZ)+1;
1665         Known.Zero.setBitsFrom(LowBits);
1666         break;
1667       }
1668       case Intrinsic::cttz: {
1669         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1670         // If we have a known 1, its position is our upper bound.
1671         unsigned PossibleTZ = Known2.One.countTrailingZeros();
1672         // If this call is undefined for 0, the result will be less than 2^n.
1673         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1674           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1675         unsigned LowBits = Log2_32(PossibleTZ)+1;
1676         Known.Zero.setBitsFrom(LowBits);
1677         break;
1678       }
1679       case Intrinsic::ctpop: {
1680         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1681         // We can bound the space the count needs.  Also, bits known to be zero
1682         // can't contribute to the population.
1683         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1684         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1685         Known.Zero.setBitsFrom(LowBits);
1686         // TODO: we could bound KnownOne using the lower bound on the number
1687         // of bits which might be set provided by popcnt KnownOne2.
1688         break;
1689       }
1690       case Intrinsic::fshr:
1691       case Intrinsic::fshl: {
1692         const APInt *SA;
1693         if (!match(I->getOperand(2), m_APInt(SA)))
1694           break;
1695 
1696         // Normalize to funnel shift left.
1697         uint64_t ShiftAmt = SA->urem(BitWidth);
1698         if (II->getIntrinsicID() == Intrinsic::fshr)
1699           ShiftAmt = BitWidth - ShiftAmt;
1700 
1701         KnownBits Known3(BitWidth);
1702         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1703         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1704 
1705         Known.Zero =
1706             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1707         Known.One =
1708             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1709         break;
1710       }
1711       case Intrinsic::uadd_sat:
1712       case Intrinsic::usub_sat: {
1713         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1714         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1715         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1716 
1717         // Add: Leading ones of either operand are preserved.
1718         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1719         // as leading zeros in the result.
1720         unsigned LeadingKnown;
1721         if (IsAdd)
1722           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1723                                   Known2.countMinLeadingOnes());
1724         else
1725           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1726                                   Known2.countMinLeadingOnes());
1727 
1728         Known = KnownBits::computeForAddSub(
1729             IsAdd, /* NSW */ false, Known, Known2);
1730 
1731         // We select between the operation result and all-ones/zero
1732         // respectively, so we can preserve known ones/zeros.
1733         if (IsAdd) {
1734           Known.One.setHighBits(LeadingKnown);
1735           Known.Zero.clearAllBits();
1736         } else {
1737           Known.Zero.setHighBits(LeadingKnown);
1738           Known.One.clearAllBits();
1739         }
1740         break;
1741       }
1742       case Intrinsic::umin:
1743         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1744         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1745         Known = KnownBits::umin(Known, Known2);
1746         break;
1747       case Intrinsic::umax:
1748         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1749         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1750         Known = KnownBits::umax(Known, Known2);
1751         break;
1752       case Intrinsic::smin:
1753         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1754         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1755         Known = KnownBits::smin(Known, Known2);
1756         break;
1757       case Intrinsic::smax:
1758         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1759         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1760         Known = KnownBits::smax(Known, Known2);
1761         break;
1762       case Intrinsic::x86_sse42_crc32_64_64:
1763         Known.Zero.setBitsFrom(32);
1764         break;
1765       }
1766     }
1767     break;
1768   case Instruction::ShuffleVector: {
1769     auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1770     // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1771     if (!Shuf) {
1772       Known.resetAll();
1773       return;
1774     }
1775     // For undef elements, we don't know anything about the common state of
1776     // the shuffle result.
1777     APInt DemandedLHS, DemandedRHS;
1778     if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1779       Known.resetAll();
1780       return;
1781     }
1782     Known.One.setAllBits();
1783     Known.Zero.setAllBits();
1784     if (!!DemandedLHS) {
1785       const Value *LHS = Shuf->getOperand(0);
1786       computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1787       // If we don't know any bits, early out.
1788       if (Known.isUnknown())
1789         break;
1790     }
1791     if (!!DemandedRHS) {
1792       const Value *RHS = Shuf->getOperand(1);
1793       computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1794       Known.One &= Known2.One;
1795       Known.Zero &= Known2.Zero;
1796     }
1797     break;
1798   }
1799   case Instruction::InsertElement: {
1800     const Value *Vec = I->getOperand(0);
1801     const Value *Elt = I->getOperand(1);
1802     auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1803     // Early out if the index is non-constant or out-of-range.
1804     unsigned NumElts = DemandedElts.getBitWidth();
1805     if (!CIdx || CIdx->getValue().uge(NumElts)) {
1806       Known.resetAll();
1807       return;
1808     }
1809     Known.One.setAllBits();
1810     Known.Zero.setAllBits();
1811     unsigned EltIdx = CIdx->getZExtValue();
1812     // Do we demand the inserted element?
1813     if (DemandedElts[EltIdx]) {
1814       computeKnownBits(Elt, Known, Depth + 1, Q);
1815       // If we don't know any bits, early out.
1816       if (Known.isUnknown())
1817         break;
1818     }
1819     // We don't need the base vector element that has been inserted.
1820     APInt DemandedVecElts = DemandedElts;
1821     DemandedVecElts.clearBit(EltIdx);
1822     if (!!DemandedVecElts) {
1823       computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1824       Known.One &= Known2.One;
1825       Known.Zero &= Known2.Zero;
1826     }
1827     break;
1828   }
1829   case Instruction::ExtractElement: {
1830     // Look through extract element. If the index is non-constant or
1831     // out-of-range demand all elements, otherwise just the extracted element.
1832     const Value *Vec = I->getOperand(0);
1833     const Value *Idx = I->getOperand(1);
1834     auto *CIdx = dyn_cast<ConstantInt>(Idx);
1835     if (isa<ScalableVectorType>(Vec->getType())) {
1836       // FIXME: there's probably *something* we can do with scalable vectors
1837       Known.resetAll();
1838       break;
1839     }
1840     unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1841     APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1842     if (CIdx && CIdx->getValue().ult(NumElts))
1843       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1844     computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1845     break;
1846   }
1847   case Instruction::ExtractValue:
1848     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1849       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1850       if (EVI->getNumIndices() != 1) break;
1851       if (EVI->getIndices()[0] == 0) {
1852         switch (II->getIntrinsicID()) {
1853         default: break;
1854         case Intrinsic::uadd_with_overflow:
1855         case Intrinsic::sadd_with_overflow:
1856           computeKnownBitsAddSub(true, II->getArgOperand(0),
1857                                  II->getArgOperand(1), false, DemandedElts,
1858                                  Known, Known2, Depth, Q);
1859           break;
1860         case Intrinsic::usub_with_overflow:
1861         case Intrinsic::ssub_with_overflow:
1862           computeKnownBitsAddSub(false, II->getArgOperand(0),
1863                                  II->getArgOperand(1), false, DemandedElts,
1864                                  Known, Known2, Depth, Q);
1865           break;
1866         case Intrinsic::umul_with_overflow:
1867         case Intrinsic::smul_with_overflow:
1868           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1869                               DemandedElts, Known, Known2, Depth, Q);
1870           break;
1871         }
1872       }
1873     }
1874     break;
1875   case Instruction::Freeze:
1876     if (isGuaranteedNotToBePoison(I->getOperand(0), Q.CxtI, Q.DT, Depth + 1))
1877       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1878     break;
1879   }
1880 }
1881 
1882 /// Determine which bits of V are known to be either zero or one and return
1883 /// them.
1884 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1885                            unsigned Depth, const Query &Q) {
1886   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1887   computeKnownBits(V, DemandedElts, Known, Depth, Q);
1888   return Known;
1889 }
1890 
1891 /// Determine which bits of V are known to be either zero or one and return
1892 /// them.
1893 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1894   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1895   computeKnownBits(V, Known, Depth, Q);
1896   return Known;
1897 }
1898 
1899 /// Determine which bits of V are known to be either zero or one and return
1900 /// them in the Known bit set.
1901 ///
1902 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1903 /// we cannot optimize based on the assumption that it is zero without changing
1904 /// it to be an explicit zero.  If we don't change it to zero, other code could
1905 /// optimized based on the contradictory assumption that it is non-zero.
1906 /// Because instcombine aggressively folds operations with undef args anyway,
1907 /// this won't lose us code quality.
1908 ///
1909 /// This function is defined on values with integer type, values with pointer
1910 /// type, and vectors of integers.  In the case
1911 /// where V is a vector, known zero, and known one values are the
1912 /// same width as the vector element, and the bit is set only if it is true
1913 /// for all of the demanded elements in the vector specified by DemandedElts.
1914 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1915                       KnownBits &Known, unsigned Depth, const Query &Q) {
1916   if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1917     // No demanded elts or V is a scalable vector, better to assume we don't
1918     // know anything.
1919     Known.resetAll();
1920     return;
1921   }
1922 
1923   assert(V && "No Value?");
1924   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1925 
1926 #ifndef NDEBUG
1927   Type *Ty = V->getType();
1928   unsigned BitWidth = Known.getBitWidth();
1929 
1930   assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1931          "Not integer or pointer type!");
1932 
1933   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1934     assert(
1935         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1936         "DemandedElt width should equal the fixed vector number of elements");
1937   } else {
1938     assert(DemandedElts == APInt(1, 1) &&
1939            "DemandedElt width should be 1 for scalars");
1940   }
1941 
1942   Type *ScalarTy = Ty->getScalarType();
1943   if (ScalarTy->isPointerTy()) {
1944     assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1945            "V and Known should have same BitWidth");
1946   } else {
1947     assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1948            "V and Known should have same BitWidth");
1949   }
1950 #endif
1951 
1952   const APInt *C;
1953   if (match(V, m_APInt(C))) {
1954     // We know all of the bits for a scalar constant or a splat vector constant!
1955     Known.One = *C;
1956     Known.Zero = ~Known.One;
1957     return;
1958   }
1959   // Null and aggregate-zero are all-zeros.
1960   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1961     Known.setAllZero();
1962     return;
1963   }
1964   // Handle a constant vector by taking the intersection of the known bits of
1965   // each element.
1966   if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1967     // We know that CDV must be a vector of integers. Take the intersection of
1968     // each element.
1969     Known.Zero.setAllBits(); Known.One.setAllBits();
1970     for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1971       if (!DemandedElts[i])
1972         continue;
1973       APInt Elt = CDV->getElementAsAPInt(i);
1974       Known.Zero &= ~Elt;
1975       Known.One &= Elt;
1976     }
1977     return;
1978   }
1979 
1980   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1981     // We know that CV must be a vector of integers. Take the intersection of
1982     // each element.
1983     Known.Zero.setAllBits(); Known.One.setAllBits();
1984     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1985       if (!DemandedElts[i])
1986         continue;
1987       Constant *Element = CV->getAggregateElement(i);
1988       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1989       if (!ElementCI) {
1990         Known.resetAll();
1991         return;
1992       }
1993       const APInt &Elt = ElementCI->getValue();
1994       Known.Zero &= ~Elt;
1995       Known.One &= Elt;
1996     }
1997     return;
1998   }
1999 
2000   // Start out not knowing anything.
2001   Known.resetAll();
2002 
2003   // We can't imply anything about undefs.
2004   if (isa<UndefValue>(V))
2005     return;
2006 
2007   // There's no point in looking through other users of ConstantData for
2008   // assumptions.  Confirm that we've handled them all.
2009   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
2010 
2011   // All recursive calls that increase depth must come after this.
2012   if (Depth == MaxAnalysisRecursionDepth)
2013     return;
2014 
2015   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2016   // the bits of its aliasee.
2017   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2018     if (!GA->isInterposable())
2019       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
2020     return;
2021   }
2022 
2023   if (const Operator *I = dyn_cast<Operator>(V))
2024     computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
2025 
2026   // Aligned pointers have trailing zeros - refine Known.Zero set
2027   if (isa<PointerType>(V->getType())) {
2028     Align Alignment = V->getPointerAlignment(Q.DL);
2029     Known.Zero.setLowBits(countTrailingZeros(Alignment.value()));
2030   }
2031 
2032   // computeKnownBitsFromAssume strictly refines Known.
2033   // Therefore, we run them after computeKnownBitsFromOperator.
2034 
2035   // Check whether a nearby assume intrinsic can determine some known bits.
2036   computeKnownBitsFromAssume(V, Known, Depth, Q);
2037 
2038   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
2039 }
2040 
2041 /// Return true if the given value is known to have exactly one
2042 /// bit set when defined. For vectors return true if every element is known to
2043 /// be a power of two when defined. Supports values with integer or pointer
2044 /// types and vectors of integers.
2045 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2046                             const Query &Q) {
2047   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2048 
2049   // Attempt to match against constants.
2050   if (OrZero && match(V, m_Power2OrZero()))
2051       return true;
2052   if (match(V, m_Power2()))
2053       return true;
2054 
2055   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
2056   // it is shifted off the end then the result is undefined.
2057   if (match(V, m_Shl(m_One(), m_Value())))
2058     return true;
2059 
2060   // (signmask) >>l X is clearly a power of two if the one is not shifted off
2061   // the bottom.  If it is shifted off the bottom then the result is undefined.
2062   if (match(V, m_LShr(m_SignMask(), m_Value())))
2063     return true;
2064 
2065   // The remaining tests are all recursive, so bail out if we hit the limit.
2066   if (Depth++ == MaxAnalysisRecursionDepth)
2067     return false;
2068 
2069   Value *X = nullptr, *Y = nullptr;
2070   // A shift left or a logical shift right of a power of two is a power of two
2071   // or zero.
2072   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2073                  match(V, m_LShr(m_Value(X), m_Value()))))
2074     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2075 
2076   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2077     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2078 
2079   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2080     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2081            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2082 
2083   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2084     // A power of two and'd with anything is a power of two or zero.
2085     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2086         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2087       return true;
2088     // X & (-X) is always a power of two or zero.
2089     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2090       return true;
2091     return false;
2092   }
2093 
2094   // Adding a power-of-two or zero to the same power-of-two or zero yields
2095   // either the original power-of-two, a larger power-of-two or zero.
2096   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2097     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2098     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2099         Q.IIQ.hasNoSignedWrap(VOBO)) {
2100       if (match(X, m_And(m_Specific(Y), m_Value())) ||
2101           match(X, m_And(m_Value(), m_Specific(Y))))
2102         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2103           return true;
2104       if (match(Y, m_And(m_Specific(X), m_Value())) ||
2105           match(Y, m_And(m_Value(), m_Specific(X))))
2106         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2107           return true;
2108 
2109       unsigned BitWidth = V->getType()->getScalarSizeInBits();
2110       KnownBits LHSBits(BitWidth);
2111       computeKnownBits(X, LHSBits, Depth, Q);
2112 
2113       KnownBits RHSBits(BitWidth);
2114       computeKnownBits(Y, RHSBits, Depth, Q);
2115       // If i8 V is a power of two or zero:
2116       //  ZeroBits: 1 1 1 0 1 1 1 1
2117       // ~ZeroBits: 0 0 0 1 0 0 0 0
2118       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2119         // If OrZero isn't set, we cannot give back a zero result.
2120         // Make sure either the LHS or RHS has a bit set.
2121         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2122           return true;
2123     }
2124   }
2125 
2126   // An exact divide or right shift can only shift off zero bits, so the result
2127   // is a power of two only if the first operand is a power of two and not
2128   // copying a sign bit (sdiv int_min, 2).
2129   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2130       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2131     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2132                                   Depth, Q);
2133   }
2134 
2135   return false;
2136 }
2137 
2138 /// Test whether a GEP's result is known to be non-null.
2139 ///
2140 /// Uses properties inherent in a GEP to try to determine whether it is known
2141 /// to be non-null.
2142 ///
2143 /// Currently this routine does not support vector GEPs.
2144 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2145                               const Query &Q) {
2146   const Function *F = nullptr;
2147   if (const Instruction *I = dyn_cast<Instruction>(GEP))
2148     F = I->getFunction();
2149 
2150   if (!GEP->isInBounds() ||
2151       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2152     return false;
2153 
2154   // FIXME: Support vector-GEPs.
2155   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2156 
2157   // If the base pointer is non-null, we cannot walk to a null address with an
2158   // inbounds GEP in address space zero.
2159   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2160     return true;
2161 
2162   // Walk the GEP operands and see if any operand introduces a non-zero offset.
2163   // If so, then the GEP cannot produce a null pointer, as doing so would
2164   // inherently violate the inbounds contract within address space zero.
2165   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2166        GTI != GTE; ++GTI) {
2167     // Struct types are easy -- they must always be indexed by a constant.
2168     if (StructType *STy = GTI.getStructTypeOrNull()) {
2169       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2170       unsigned ElementIdx = OpC->getZExtValue();
2171       const StructLayout *SL = Q.DL.getStructLayout(STy);
2172       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2173       if (ElementOffset > 0)
2174         return true;
2175       continue;
2176     }
2177 
2178     // If we have a zero-sized type, the index doesn't matter. Keep looping.
2179     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2180       continue;
2181 
2182     // Fast path the constant operand case both for efficiency and so we don't
2183     // increment Depth when just zipping down an all-constant GEP.
2184     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2185       if (!OpC->isZero())
2186         return true;
2187       continue;
2188     }
2189 
2190     // We post-increment Depth here because while isKnownNonZero increments it
2191     // as well, when we pop back up that increment won't persist. We don't want
2192     // to recurse 10k times just because we have 10k GEP operands. We don't
2193     // bail completely out because we want to handle constant GEPs regardless
2194     // of depth.
2195     if (Depth++ >= MaxAnalysisRecursionDepth)
2196       continue;
2197 
2198     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2199       return true;
2200   }
2201 
2202   return false;
2203 }
2204 
2205 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2206                                                   const Instruction *CtxI,
2207                                                   const DominatorTree *DT) {
2208   if (isa<Constant>(V))
2209     return false;
2210 
2211   if (!CtxI || !DT)
2212     return false;
2213 
2214   unsigned NumUsesExplored = 0;
2215   for (auto *U : V->users()) {
2216     // Avoid massive lists
2217     if (NumUsesExplored >= DomConditionsMaxUses)
2218       break;
2219     NumUsesExplored++;
2220 
2221     // If the value is used as an argument to a call or invoke, then argument
2222     // attributes may provide an answer about null-ness.
2223     if (const auto *CB = dyn_cast<CallBase>(U))
2224       if (auto *CalledFunc = CB->getCalledFunction())
2225         for (const Argument &Arg : CalledFunc->args())
2226           if (CB->getArgOperand(Arg.getArgNo()) == V &&
2227               Arg.hasNonNullAttr() && DT->dominates(CB, CtxI))
2228             return true;
2229 
2230     // If the value is used as a load/store, then the pointer must be non null.
2231     if (V == getLoadStorePointerOperand(U)) {
2232       const Instruction *I = cast<Instruction>(U);
2233       if (!NullPointerIsDefined(I->getFunction(),
2234                                 V->getType()->getPointerAddressSpace()) &&
2235           DT->dominates(I, CtxI))
2236         return true;
2237     }
2238 
2239     // Consider only compare instructions uniquely controlling a branch
2240     CmpInst::Predicate Pred;
2241     if (!match(const_cast<User *>(U),
2242                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
2243         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
2244       continue;
2245 
2246     SmallVector<const User *, 4> WorkList;
2247     SmallPtrSet<const User *, 4> Visited;
2248     for (auto *CmpU : U->users()) {
2249       assert(WorkList.empty() && "Should be!");
2250       if (Visited.insert(CmpU).second)
2251         WorkList.push_back(CmpU);
2252 
2253       while (!WorkList.empty()) {
2254         auto *Curr = WorkList.pop_back_val();
2255 
2256         // If a user is an AND, add all its users to the work list. We only
2257         // propagate "pred != null" condition through AND because it is only
2258         // correct to assume that all conditions of AND are met in true branch.
2259         // TODO: Support similar logic of OR and EQ predicate?
2260         if (Pred == ICmpInst::ICMP_NE)
2261           if (auto *BO = dyn_cast<BinaryOperator>(Curr))
2262             if (BO->getOpcode() == Instruction::And) {
2263               for (auto *BOU : BO->users())
2264                 if (Visited.insert(BOU).second)
2265                   WorkList.push_back(BOU);
2266               continue;
2267             }
2268 
2269         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2270           assert(BI->isConditional() && "uses a comparison!");
2271 
2272           BasicBlock *NonNullSuccessor =
2273               BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
2274           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2275           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2276             return true;
2277         } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
2278                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
2279           return true;
2280         }
2281       }
2282     }
2283   }
2284 
2285   return false;
2286 }
2287 
2288 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2289 /// ensure that the value it's attached to is never Value?  'RangeType' is
2290 /// is the type of the value described by the range.
2291 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2292   const unsigned NumRanges = Ranges->getNumOperands() / 2;
2293   assert(NumRanges >= 1);
2294   for (unsigned i = 0; i < NumRanges; ++i) {
2295     ConstantInt *Lower =
2296         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2297     ConstantInt *Upper =
2298         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2299     ConstantRange Range(Lower->getValue(), Upper->getValue());
2300     if (Range.contains(Value))
2301       return false;
2302   }
2303   return true;
2304 }
2305 
2306 /// Return true if the given value is known to be non-zero when defined. For
2307 /// vectors, return true if every demanded element is known to be non-zero when
2308 /// defined. For pointers, if the context instruction and dominator tree are
2309 /// specified, perform context-sensitive analysis and return true if the
2310 /// pointer couldn't possibly be null at the specified instruction.
2311 /// Supports values with integer or pointer type and vectors of integers.
2312 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2313                     const Query &Q) {
2314   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2315   // vector
2316   if (isa<ScalableVectorType>(V->getType()))
2317     return false;
2318 
2319   if (auto *C = dyn_cast<Constant>(V)) {
2320     if (C->isNullValue())
2321       return false;
2322     if (isa<ConstantInt>(C))
2323       // Must be non-zero due to null test above.
2324       return true;
2325 
2326     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2327       // See the comment for IntToPtr/PtrToInt instructions below.
2328       if (CE->getOpcode() == Instruction::IntToPtr ||
2329           CE->getOpcode() == Instruction::PtrToInt)
2330         if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) <=
2331             Q.DL.getTypeSizeInBits(CE->getType()))
2332           return isKnownNonZero(CE->getOperand(0), Depth, Q);
2333     }
2334 
2335     // For constant vectors, check that all elements are undefined or known
2336     // non-zero to determine that the whole vector is known non-zero.
2337     if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2338       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2339         if (!DemandedElts[i])
2340           continue;
2341         Constant *Elt = C->getAggregateElement(i);
2342         if (!Elt || Elt->isNullValue())
2343           return false;
2344         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2345           return false;
2346       }
2347       return true;
2348     }
2349 
2350     // A global variable in address space 0 is non null unless extern weak
2351     // or an absolute symbol reference. Other address spaces may have null as a
2352     // valid address for a global, so we can't assume anything.
2353     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2354       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2355           GV->getType()->getAddressSpace() == 0)
2356         return true;
2357     } else
2358       return false;
2359   }
2360 
2361   if (auto *I = dyn_cast<Instruction>(V)) {
2362     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2363       // If the possible ranges don't contain zero, then the value is
2364       // definitely non-zero.
2365       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2366         const APInt ZeroValue(Ty->getBitWidth(), 0);
2367         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2368           return true;
2369       }
2370     }
2371   }
2372 
2373   if (isKnownNonZeroFromAssume(V, Q))
2374     return true;
2375 
2376   // Some of the tests below are recursive, so bail out if we hit the limit.
2377   if (Depth++ >= MaxAnalysisRecursionDepth)
2378     return false;
2379 
2380   // Check for pointer simplifications.
2381 
2382   if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2383     // Alloca never returns null, malloc might.
2384     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2385       return true;
2386 
2387     // A byval, inalloca may not be null in a non-default addres space. A
2388     // nonnull argument is assumed never 0.
2389     if (const Argument *A = dyn_cast<Argument>(V)) {
2390       if (((A->hasPassPointeeByValueCopyAttr() &&
2391             !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2392            A->hasNonNullAttr()))
2393         return true;
2394     }
2395 
2396     // A Load tagged with nonnull metadata is never null.
2397     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2398       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2399         return true;
2400 
2401     if (const auto *Call = dyn_cast<CallBase>(V)) {
2402       if (Call->isReturnNonNull())
2403         return true;
2404       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2405         return isKnownNonZero(RP, Depth, Q);
2406     }
2407   }
2408 
2409   if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2410     return true;
2411 
2412   // Check for recursive pointer simplifications.
2413   if (V->getType()->isPointerTy()) {
2414     // Look through bitcast operations, GEPs, and int2ptr instructions as they
2415     // do not alter the value, or at least not the nullness property of the
2416     // value, e.g., int2ptr is allowed to zero/sign extend the value.
2417     //
2418     // Note that we have to take special care to avoid looking through
2419     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2420     // as casts that can alter the value, e.g., AddrSpaceCasts.
2421     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2422       return isGEPKnownNonNull(GEP, Depth, Q);
2423 
2424     if (auto *BCO = dyn_cast<BitCastOperator>(V))
2425       return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2426 
2427     if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2428       if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
2429           Q.DL.getTypeSizeInBits(I2P->getDestTy()))
2430         return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2431   }
2432 
2433   // Similar to int2ptr above, we can look through ptr2int here if the cast
2434   // is a no-op or an extend and not a truncate.
2435   if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2436     if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
2437         Q.DL.getTypeSizeInBits(P2I->getDestTy()))
2438       return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2439 
2440   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2441 
2442   // X | Y != 0 if X != 0 or Y != 0.
2443   Value *X = nullptr, *Y = nullptr;
2444   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2445     return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2446            isKnownNonZero(Y, DemandedElts, Depth, Q);
2447 
2448   // ext X != 0 if X != 0.
2449   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2450     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2451 
2452   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2453   // if the lowest bit is shifted off the end.
2454   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2455     // shl nuw can't remove any non-zero bits.
2456     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2457     if (Q.IIQ.hasNoUnsignedWrap(BO))
2458       return isKnownNonZero(X, Depth, Q);
2459 
2460     KnownBits Known(BitWidth);
2461     computeKnownBits(X, DemandedElts, Known, Depth, Q);
2462     if (Known.One[0])
2463       return true;
2464   }
2465   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2466   // defined if the sign bit is shifted off the end.
2467   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2468     // shr exact can only shift out zero bits.
2469     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2470     if (BO->isExact())
2471       return isKnownNonZero(X, Depth, Q);
2472 
2473     KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2474     if (Known.isNegative())
2475       return true;
2476 
2477     // If the shifter operand is a constant, and all of the bits shifted
2478     // out are known to be zero, and X is known non-zero then at least one
2479     // non-zero bit must remain.
2480     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2481       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2482       // Is there a known one in the portion not shifted out?
2483       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2484         return true;
2485       // Are all the bits to be shifted out known zero?
2486       if (Known.countMinTrailingZeros() >= ShiftVal)
2487         return isKnownNonZero(X, DemandedElts, Depth, Q);
2488     }
2489   }
2490   // div exact can only produce a zero if the dividend is zero.
2491   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2492     return isKnownNonZero(X, DemandedElts, Depth, Q);
2493   }
2494   // X + Y.
2495   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2496     KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2497     KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2498 
2499     // If X and Y are both non-negative (as signed values) then their sum is not
2500     // zero unless both X and Y are zero.
2501     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2502       if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2503           isKnownNonZero(Y, DemandedElts, Depth, Q))
2504         return true;
2505 
2506     // If X and Y are both negative (as signed values) then their sum is not
2507     // zero unless both X and Y equal INT_MIN.
2508     if (XKnown.isNegative() && YKnown.isNegative()) {
2509       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2510       // The sign bit of X is set.  If some other bit is set then X is not equal
2511       // to INT_MIN.
2512       if (XKnown.One.intersects(Mask))
2513         return true;
2514       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2515       // to INT_MIN.
2516       if (YKnown.One.intersects(Mask))
2517         return true;
2518     }
2519 
2520     // The sum of a non-negative number and a power of two is not zero.
2521     if (XKnown.isNonNegative() &&
2522         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2523       return true;
2524     if (YKnown.isNonNegative() &&
2525         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2526       return true;
2527   }
2528   // X * Y.
2529   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2530     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2531     // If X and Y are non-zero then so is X * Y as long as the multiplication
2532     // does not overflow.
2533     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2534         isKnownNonZero(X, DemandedElts, Depth, Q) &&
2535         isKnownNonZero(Y, DemandedElts, Depth, Q))
2536       return true;
2537   }
2538   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2539   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2540     if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2541         isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2542       return true;
2543   }
2544   // PHI
2545   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2546     // Try and detect a recurrence that monotonically increases from a
2547     // starting value, as these are common as induction variables.
2548     if (PN->getNumIncomingValues() == 2) {
2549       Value *Start = PN->getIncomingValue(0);
2550       Value *Induction = PN->getIncomingValue(1);
2551       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2552         std::swap(Start, Induction);
2553       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2554         if (!C->isZero() && !C->isNegative()) {
2555           ConstantInt *X;
2556           if (Q.IIQ.UseInstrInfo &&
2557               (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2558                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2559               !X->isNegative())
2560             return true;
2561         }
2562       }
2563     }
2564     // Check if all incoming values are non-zero constant.
2565     bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2566       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2567     });
2568     if (AllNonZeroConstants)
2569       return true;
2570   }
2571   // ExtractElement
2572   else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2573     const Value *Vec = EEI->getVectorOperand();
2574     const Value *Idx = EEI->getIndexOperand();
2575     auto *CIdx = dyn_cast<ConstantInt>(Idx);
2576     if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2577       unsigned NumElts = VecTy->getNumElements();
2578       APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2579       if (CIdx && CIdx->getValue().ult(NumElts))
2580         DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2581       return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2582     }
2583   }
2584   // Freeze
2585   else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2586     auto *Op = FI->getOperand(0);
2587     if (isKnownNonZero(Op, Depth, Q) &&
2588         isGuaranteedNotToBePoison(Op, Q.CxtI, Q.DT, Depth))
2589       return true;
2590   }
2591 
2592   KnownBits Known(BitWidth);
2593   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2594   return Known.One != 0;
2595 }
2596 
2597 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2598   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2599   // vector
2600   if (isa<ScalableVectorType>(V->getType()))
2601     return false;
2602 
2603   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2604   APInt DemandedElts =
2605       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2606   return isKnownNonZero(V, DemandedElts, Depth, Q);
2607 }
2608 
2609 /// Return true if V2 == V1 + X, where X is known non-zero.
2610 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2611   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2612   if (!BO || BO->getOpcode() != Instruction::Add)
2613     return false;
2614   Value *Op = nullptr;
2615   if (V2 == BO->getOperand(0))
2616     Op = BO->getOperand(1);
2617   else if (V2 == BO->getOperand(1))
2618     Op = BO->getOperand(0);
2619   else
2620     return false;
2621   return isKnownNonZero(Op, 0, Q);
2622 }
2623 
2624 /// Return true if it is known that V1 != V2.
2625 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2626   if (V1 == V2)
2627     return false;
2628   if (V1->getType() != V2->getType())
2629     // We can't look through casts yet.
2630     return false;
2631   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2632     return true;
2633 
2634   if (V1->getType()->isIntOrIntVectorTy()) {
2635     // Are any known bits in V1 contradictory to known bits in V2? If V1
2636     // has a known zero where V2 has a known one, they must not be equal.
2637     KnownBits Known1 = computeKnownBits(V1, 0, Q);
2638     KnownBits Known2 = computeKnownBits(V2, 0, Q);
2639 
2640     if (Known1.Zero.intersects(Known2.One) ||
2641         Known2.Zero.intersects(Known1.One))
2642       return true;
2643   }
2644   return false;
2645 }
2646 
2647 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2648 /// simplify operations downstream. Mask is known to be zero for bits that V
2649 /// cannot have.
2650 ///
2651 /// This function is defined on values with integer type, values with pointer
2652 /// type, and vectors of integers.  In the case
2653 /// where V is a vector, the mask, known zero, and known one values are the
2654 /// same width as the vector element, and the bit is set only if it is true
2655 /// for all of the elements in the vector.
2656 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2657                        const Query &Q) {
2658   KnownBits Known(Mask.getBitWidth());
2659   computeKnownBits(V, Known, Depth, Q);
2660   return Mask.isSubsetOf(Known.Zero);
2661 }
2662 
2663 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2664 // Returns the input and lower/upper bounds.
2665 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2666                                 const APInt *&CLow, const APInt *&CHigh) {
2667   assert(isa<Operator>(Select) &&
2668          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2669          "Input should be a Select!");
2670 
2671   const Value *LHS = nullptr, *RHS = nullptr;
2672   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2673   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2674     return false;
2675 
2676   if (!match(RHS, m_APInt(CLow)))
2677     return false;
2678 
2679   const Value *LHS2 = nullptr, *RHS2 = nullptr;
2680   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2681   if (getInverseMinMaxFlavor(SPF) != SPF2)
2682     return false;
2683 
2684   if (!match(RHS2, m_APInt(CHigh)))
2685     return false;
2686 
2687   if (SPF == SPF_SMIN)
2688     std::swap(CLow, CHigh);
2689 
2690   In = LHS2;
2691   return CLow->sle(*CHigh);
2692 }
2693 
2694 /// For vector constants, loop over the elements and find the constant with the
2695 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2696 /// or if any element was not analyzed; otherwise, return the count for the
2697 /// element with the minimum number of sign bits.
2698 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2699                                                  const APInt &DemandedElts,
2700                                                  unsigned TyBits) {
2701   const auto *CV = dyn_cast<Constant>(V);
2702   if (!CV || !isa<FixedVectorType>(CV->getType()))
2703     return 0;
2704 
2705   unsigned MinSignBits = TyBits;
2706   unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2707   for (unsigned i = 0; i != NumElts; ++i) {
2708     if (!DemandedElts[i])
2709       continue;
2710     // If we find a non-ConstantInt, bail out.
2711     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2712     if (!Elt)
2713       return 0;
2714 
2715     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2716   }
2717 
2718   return MinSignBits;
2719 }
2720 
2721 static unsigned ComputeNumSignBitsImpl(const Value *V,
2722                                        const APInt &DemandedElts,
2723                                        unsigned Depth, const Query &Q);
2724 
2725 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2726                                    unsigned Depth, const Query &Q) {
2727   unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2728   assert(Result > 0 && "At least one sign bit needs to be present!");
2729   return Result;
2730 }
2731 
2732 /// Return the number of times the sign bit of the register is replicated into
2733 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2734 /// (itself), but other cases can give us information. For example, immediately
2735 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2736 /// other, so we return 3. For vectors, return the number of sign bits for the
2737 /// vector element with the minimum number of known sign bits of the demanded
2738 /// elements in the vector specified by DemandedElts.
2739 static unsigned ComputeNumSignBitsImpl(const Value *V,
2740                                        const APInt &DemandedElts,
2741                                        unsigned Depth, const Query &Q) {
2742   Type *Ty = V->getType();
2743 
2744   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2745   // vector
2746   if (isa<ScalableVectorType>(Ty))
2747     return 1;
2748 
2749 #ifndef NDEBUG
2750   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2751 
2752   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2753     assert(
2754         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2755         "DemandedElt width should equal the fixed vector number of elements");
2756   } else {
2757     assert(DemandedElts == APInt(1, 1) &&
2758            "DemandedElt width should be 1 for scalars");
2759   }
2760 #endif
2761 
2762   // We return the minimum number of sign bits that are guaranteed to be present
2763   // in V, so for undef we have to conservatively return 1.  We don't have the
2764   // same behavior for poison though -- that's a FIXME today.
2765 
2766   Type *ScalarTy = Ty->getScalarType();
2767   unsigned TyBits = ScalarTy->isPointerTy() ?
2768     Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2769     Q.DL.getTypeSizeInBits(ScalarTy);
2770 
2771   unsigned Tmp, Tmp2;
2772   unsigned FirstAnswer = 1;
2773 
2774   // Note that ConstantInt is handled by the general computeKnownBits case
2775   // below.
2776 
2777   if (Depth == MaxAnalysisRecursionDepth)
2778     return 1;
2779 
2780   if (auto *U = dyn_cast<Operator>(V)) {
2781     switch (Operator::getOpcode(V)) {
2782     default: break;
2783     case Instruction::SExt:
2784       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2785       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2786 
2787     case Instruction::SDiv: {
2788       const APInt *Denominator;
2789       // sdiv X, C -> adds log(C) sign bits.
2790       if (match(U->getOperand(1), m_APInt(Denominator))) {
2791 
2792         // Ignore non-positive denominator.
2793         if (!Denominator->isStrictlyPositive())
2794           break;
2795 
2796         // Calculate the incoming numerator bits.
2797         unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2798 
2799         // Add floor(log(C)) bits to the numerator bits.
2800         return std::min(TyBits, NumBits + Denominator->logBase2());
2801       }
2802       break;
2803     }
2804 
2805     case Instruction::SRem: {
2806       const APInt *Denominator;
2807       // srem X, C -> we know that the result is within [-C+1,C) when C is a
2808       // positive constant.  This let us put a lower bound on the number of sign
2809       // bits.
2810       if (match(U->getOperand(1), m_APInt(Denominator))) {
2811 
2812         // Ignore non-positive denominator.
2813         if (!Denominator->isStrictlyPositive())
2814           break;
2815 
2816         // Calculate the incoming numerator bits. SRem by a positive constant
2817         // can't lower the number of sign bits.
2818         unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2819 
2820         // Calculate the leading sign bit constraints by examining the
2821         // denominator.  Given that the denominator is positive, there are two
2822         // cases:
2823         //
2824         //  1. the numerator is positive. The result range is [0,C) and [0,C) u<
2825         //     (1 << ceilLogBase2(C)).
2826         //
2827         //  2. the numerator is negative. Then the result range is (-C,0] and
2828         //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2829         //
2830         // Thus a lower bound on the number of sign bits is `TyBits -
2831         // ceilLogBase2(C)`.
2832 
2833         unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2834         return std::max(NumrBits, ResBits);
2835       }
2836       break;
2837     }
2838 
2839     case Instruction::AShr: {
2840       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2841       // ashr X, C   -> adds C sign bits.  Vectors too.
2842       const APInt *ShAmt;
2843       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2844         if (ShAmt->uge(TyBits))
2845           break; // Bad shift.
2846         unsigned ShAmtLimited = ShAmt->getZExtValue();
2847         Tmp += ShAmtLimited;
2848         if (Tmp > TyBits) Tmp = TyBits;
2849       }
2850       return Tmp;
2851     }
2852     case Instruction::Shl: {
2853       const APInt *ShAmt;
2854       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2855         // shl destroys sign bits.
2856         Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2857         if (ShAmt->uge(TyBits) ||   // Bad shift.
2858             ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2859         Tmp2 = ShAmt->getZExtValue();
2860         return Tmp - Tmp2;
2861       }
2862       break;
2863     }
2864     case Instruction::And:
2865     case Instruction::Or:
2866     case Instruction::Xor: // NOT is handled here.
2867       // Logical binary ops preserve the number of sign bits at the worst.
2868       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2869       if (Tmp != 1) {
2870         Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2871         FirstAnswer = std::min(Tmp, Tmp2);
2872         // We computed what we know about the sign bits as our first
2873         // answer. Now proceed to the generic code that uses
2874         // computeKnownBits, and pick whichever answer is better.
2875       }
2876       break;
2877 
2878     case Instruction::Select: {
2879       // If we have a clamp pattern, we know that the number of sign bits will
2880       // be the minimum of the clamp min/max range.
2881       const Value *X;
2882       const APInt *CLow, *CHigh;
2883       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2884         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2885 
2886       Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2887       if (Tmp == 1) break;
2888       Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2889       return std::min(Tmp, Tmp2);
2890     }
2891 
2892     case Instruction::Add:
2893       // Add can have at most one carry bit.  Thus we know that the output
2894       // is, at worst, one more bit than the inputs.
2895       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2896       if (Tmp == 1) break;
2897 
2898       // Special case decrementing a value (ADD X, -1):
2899       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2900         if (CRHS->isAllOnesValue()) {
2901           KnownBits Known(TyBits);
2902           computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2903 
2904           // If the input is known to be 0 or 1, the output is 0/-1, which is
2905           // all sign bits set.
2906           if ((Known.Zero | 1).isAllOnesValue())
2907             return TyBits;
2908 
2909           // If we are subtracting one from a positive number, there is no carry
2910           // out of the result.
2911           if (Known.isNonNegative())
2912             return Tmp;
2913         }
2914 
2915       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2916       if (Tmp2 == 1) break;
2917       return std::min(Tmp, Tmp2) - 1;
2918 
2919     case Instruction::Sub:
2920       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2921       if (Tmp2 == 1) break;
2922 
2923       // Handle NEG.
2924       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2925         if (CLHS->isNullValue()) {
2926           KnownBits Known(TyBits);
2927           computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2928           // If the input is known to be 0 or 1, the output is 0/-1, which is
2929           // all sign bits set.
2930           if ((Known.Zero | 1).isAllOnesValue())
2931             return TyBits;
2932 
2933           // If the input is known to be positive (the sign bit is known clear),
2934           // the output of the NEG has the same number of sign bits as the
2935           // input.
2936           if (Known.isNonNegative())
2937             return Tmp2;
2938 
2939           // Otherwise, we treat this like a SUB.
2940         }
2941 
2942       // Sub can have at most one carry bit.  Thus we know that the output
2943       // is, at worst, one more bit than the inputs.
2944       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2945       if (Tmp == 1) break;
2946       return std::min(Tmp, Tmp2) - 1;
2947 
2948     case Instruction::Mul: {
2949       // The output of the Mul can be at most twice the valid bits in the
2950       // inputs.
2951       unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2952       if (SignBitsOp0 == 1) break;
2953       unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2954       if (SignBitsOp1 == 1) break;
2955       unsigned OutValidBits =
2956           (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2957       return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2958     }
2959 
2960     case Instruction::PHI: {
2961       const PHINode *PN = cast<PHINode>(U);
2962       unsigned NumIncomingValues = PN->getNumIncomingValues();
2963       // Don't analyze large in-degree PHIs.
2964       if (NumIncomingValues > 4) break;
2965       // Unreachable blocks may have zero-operand PHI nodes.
2966       if (NumIncomingValues == 0) break;
2967 
2968       // Take the minimum of all incoming values.  This can't infinitely loop
2969       // because of our depth threshold.
2970       Query RecQ = Q;
2971       Tmp = TyBits;
2972       for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
2973         if (Tmp == 1) return Tmp;
2974         RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
2975         Tmp = std::min(
2976             Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
2977       }
2978       return Tmp;
2979     }
2980 
2981     case Instruction::Trunc:
2982       // FIXME: it's tricky to do anything useful for this, but it is an
2983       // important case for targets like X86.
2984       break;
2985 
2986     case Instruction::ExtractElement:
2987       // Look through extract element. At the moment we keep this simple and
2988       // skip tracking the specific element. But at least we might find
2989       // information valid for all elements of the vector (for example if vector
2990       // is sign extended, shifted, etc).
2991       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2992 
2993     case Instruction::ShuffleVector: {
2994       // Collect the minimum number of sign bits that are shared by every vector
2995       // element referenced by the shuffle.
2996       auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
2997       if (!Shuf) {
2998         // FIXME: Add support for shufflevector constant expressions.
2999         return 1;
3000       }
3001       APInt DemandedLHS, DemandedRHS;
3002       // For undef elements, we don't know anything about the common state of
3003       // the shuffle result.
3004       if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3005         return 1;
3006       Tmp = std::numeric_limits<unsigned>::max();
3007       if (!!DemandedLHS) {
3008         const Value *LHS = Shuf->getOperand(0);
3009         Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3010       }
3011       // If we don't know anything, early out and try computeKnownBits
3012       // fall-back.
3013       if (Tmp == 1)
3014         break;
3015       if (!!DemandedRHS) {
3016         const Value *RHS = Shuf->getOperand(1);
3017         Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3018         Tmp = std::min(Tmp, Tmp2);
3019       }
3020       // If we don't know anything, early out and try computeKnownBits
3021       // fall-back.
3022       if (Tmp == 1)
3023         break;
3024       assert(Tmp <= Ty->getScalarSizeInBits() &&
3025              "Failed to determine minimum sign bits");
3026       return Tmp;
3027     }
3028     case Instruction::Call: {
3029       if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3030         switch (II->getIntrinsicID()) {
3031         default: break;
3032         case Intrinsic::abs:
3033           Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3034           if (Tmp == 1) break;
3035 
3036           // Absolute value reduces number of sign bits by at most 1.
3037           return Tmp - 1;
3038         }
3039       }
3040     }
3041     }
3042   }
3043 
3044   // Finally, if we can prove that the top bits of the result are 0's or 1's,
3045   // use this information.
3046 
3047   // If we can examine all elements of a vector constant successfully, we're
3048   // done (we can't do any better than that). If not, keep trying.
3049   if (unsigned VecSignBits =
3050           computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3051     return VecSignBits;
3052 
3053   KnownBits Known(TyBits);
3054   computeKnownBits(V, DemandedElts, Known, Depth, Q);
3055 
3056   // If we know that the sign bit is either zero or one, determine the number of
3057   // identical bits in the top of the input value.
3058   return std::max(FirstAnswer, Known.countMinSignBits());
3059 }
3060 
3061 /// This function computes the integer multiple of Base that equals V.
3062 /// If successful, it returns true and returns the multiple in
3063 /// Multiple. If unsuccessful, it returns false. It looks
3064 /// through SExt instructions only if LookThroughSExt is true.
3065 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
3066                            bool LookThroughSExt, unsigned Depth) {
3067   assert(V && "No Value?");
3068   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3069   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
3070 
3071   Type *T = V->getType();
3072 
3073   ConstantInt *CI = dyn_cast<ConstantInt>(V);
3074 
3075   if (Base == 0)
3076     return false;
3077 
3078   if (Base == 1) {
3079     Multiple = V;
3080     return true;
3081   }
3082 
3083   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3084   Constant *BaseVal = ConstantInt::get(T, Base);
3085   if (CO && CO == BaseVal) {
3086     // Multiple is 1.
3087     Multiple = ConstantInt::get(T, 1);
3088     return true;
3089   }
3090 
3091   if (CI && CI->getZExtValue() % Base == 0) {
3092     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3093     return true;
3094   }
3095 
3096   if (Depth == MaxAnalysisRecursionDepth) return false;
3097 
3098   Operator *I = dyn_cast<Operator>(V);
3099   if (!I) return false;
3100 
3101   switch (I->getOpcode()) {
3102   default: break;
3103   case Instruction::SExt:
3104     if (!LookThroughSExt) return false;
3105     // otherwise fall through to ZExt
3106     LLVM_FALLTHROUGH;
3107   case Instruction::ZExt:
3108     return ComputeMultiple(I->getOperand(0), Base, Multiple,
3109                            LookThroughSExt, Depth+1);
3110   case Instruction::Shl:
3111   case Instruction::Mul: {
3112     Value *Op0 = I->getOperand(0);
3113     Value *Op1 = I->getOperand(1);
3114 
3115     if (I->getOpcode() == Instruction::Shl) {
3116       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3117       if (!Op1CI) return false;
3118       // Turn Op0 << Op1 into Op0 * 2^Op1
3119       APInt Op1Int = Op1CI->getValue();
3120       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3121       APInt API(Op1Int.getBitWidth(), 0);
3122       API.setBit(BitToSet);
3123       Op1 = ConstantInt::get(V->getContext(), API);
3124     }
3125 
3126     Value *Mul0 = nullptr;
3127     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3128       if (Constant *Op1C = dyn_cast<Constant>(Op1))
3129         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3130           if (Op1C->getType()->getPrimitiveSizeInBits() <
3131               MulC->getType()->getPrimitiveSizeInBits())
3132             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3133           if (Op1C->getType()->getPrimitiveSizeInBits() >
3134               MulC->getType()->getPrimitiveSizeInBits())
3135             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3136 
3137           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3138           Multiple = ConstantExpr::getMul(MulC, Op1C);
3139           return true;
3140         }
3141 
3142       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3143         if (Mul0CI->getValue() == 1) {
3144           // V == Base * Op1, so return Op1
3145           Multiple = Op1;
3146           return true;
3147         }
3148     }
3149 
3150     Value *Mul1 = nullptr;
3151     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3152       if (Constant *Op0C = dyn_cast<Constant>(Op0))
3153         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3154           if (Op0C->getType()->getPrimitiveSizeInBits() <
3155               MulC->getType()->getPrimitiveSizeInBits())
3156             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3157           if (Op0C->getType()->getPrimitiveSizeInBits() >
3158               MulC->getType()->getPrimitiveSizeInBits())
3159             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3160 
3161           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3162           Multiple = ConstantExpr::getMul(MulC, Op0C);
3163           return true;
3164         }
3165 
3166       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3167         if (Mul1CI->getValue() == 1) {
3168           // V == Base * Op0, so return Op0
3169           Multiple = Op0;
3170           return true;
3171         }
3172     }
3173   }
3174   }
3175 
3176   // We could not determine if V is a multiple of Base.
3177   return false;
3178 }
3179 
3180 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3181                                             const TargetLibraryInfo *TLI) {
3182   const Function *F = CB.getCalledFunction();
3183   if (!F)
3184     return Intrinsic::not_intrinsic;
3185 
3186   if (F->isIntrinsic())
3187     return F->getIntrinsicID();
3188 
3189   // We are going to infer semantics of a library function based on mapping it
3190   // to an LLVM intrinsic. Check that the library function is available from
3191   // this callbase and in this environment.
3192   LibFunc Func;
3193   if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3194       !CB.onlyReadsMemory())
3195     return Intrinsic::not_intrinsic;
3196 
3197   switch (Func) {
3198   default:
3199     break;
3200   case LibFunc_sin:
3201   case LibFunc_sinf:
3202   case LibFunc_sinl:
3203     return Intrinsic::sin;
3204   case LibFunc_cos:
3205   case LibFunc_cosf:
3206   case LibFunc_cosl:
3207     return Intrinsic::cos;
3208   case LibFunc_exp:
3209   case LibFunc_expf:
3210   case LibFunc_expl:
3211     return Intrinsic::exp;
3212   case LibFunc_exp2:
3213   case LibFunc_exp2f:
3214   case LibFunc_exp2l:
3215     return Intrinsic::exp2;
3216   case LibFunc_log:
3217   case LibFunc_logf:
3218   case LibFunc_logl:
3219     return Intrinsic::log;
3220   case LibFunc_log10:
3221   case LibFunc_log10f:
3222   case LibFunc_log10l:
3223     return Intrinsic::log10;
3224   case LibFunc_log2:
3225   case LibFunc_log2f:
3226   case LibFunc_log2l:
3227     return Intrinsic::log2;
3228   case LibFunc_fabs:
3229   case LibFunc_fabsf:
3230   case LibFunc_fabsl:
3231     return Intrinsic::fabs;
3232   case LibFunc_fmin:
3233   case LibFunc_fminf:
3234   case LibFunc_fminl:
3235     return Intrinsic::minnum;
3236   case LibFunc_fmax:
3237   case LibFunc_fmaxf:
3238   case LibFunc_fmaxl:
3239     return Intrinsic::maxnum;
3240   case LibFunc_copysign:
3241   case LibFunc_copysignf:
3242   case LibFunc_copysignl:
3243     return Intrinsic::copysign;
3244   case LibFunc_floor:
3245   case LibFunc_floorf:
3246   case LibFunc_floorl:
3247     return Intrinsic::floor;
3248   case LibFunc_ceil:
3249   case LibFunc_ceilf:
3250   case LibFunc_ceill:
3251     return Intrinsic::ceil;
3252   case LibFunc_trunc:
3253   case LibFunc_truncf:
3254   case LibFunc_truncl:
3255     return Intrinsic::trunc;
3256   case LibFunc_rint:
3257   case LibFunc_rintf:
3258   case LibFunc_rintl:
3259     return Intrinsic::rint;
3260   case LibFunc_nearbyint:
3261   case LibFunc_nearbyintf:
3262   case LibFunc_nearbyintl:
3263     return Intrinsic::nearbyint;
3264   case LibFunc_round:
3265   case LibFunc_roundf:
3266   case LibFunc_roundl:
3267     return Intrinsic::round;
3268   case LibFunc_roundeven:
3269   case LibFunc_roundevenf:
3270   case LibFunc_roundevenl:
3271     return Intrinsic::roundeven;
3272   case LibFunc_pow:
3273   case LibFunc_powf:
3274   case LibFunc_powl:
3275     return Intrinsic::pow;
3276   case LibFunc_sqrt:
3277   case LibFunc_sqrtf:
3278   case LibFunc_sqrtl:
3279     return Intrinsic::sqrt;
3280   }
3281 
3282   return Intrinsic::not_intrinsic;
3283 }
3284 
3285 /// Return true if we can prove that the specified FP value is never equal to
3286 /// -0.0.
3287 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3288 ///       that a value is not -0.0. It only guarantees that -0.0 may be treated
3289 ///       the same as +0.0 in floating-point ops.
3290 ///
3291 /// NOTE: this function will need to be revisited when we support non-default
3292 /// rounding modes!
3293 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3294                                 unsigned Depth) {
3295   if (auto *CFP = dyn_cast<ConstantFP>(V))
3296     return !CFP->getValueAPF().isNegZero();
3297 
3298   if (Depth == MaxAnalysisRecursionDepth)
3299     return false;
3300 
3301   auto *Op = dyn_cast<Operator>(V);
3302   if (!Op)
3303     return false;
3304 
3305   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3306   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3307     return true;
3308 
3309   // sitofp and uitofp turn into +0.0 for zero.
3310   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3311     return true;
3312 
3313   if (auto *Call = dyn_cast<CallInst>(Op)) {
3314     Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3315     switch (IID) {
3316     default:
3317       break;
3318     // sqrt(-0.0) = -0.0, no other negative results are possible.
3319     case Intrinsic::sqrt:
3320     case Intrinsic::canonicalize:
3321       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3322     // fabs(x) != -0.0
3323     case Intrinsic::fabs:
3324       return true;
3325     }
3326   }
3327 
3328   return false;
3329 }
3330 
3331 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3332 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3333 /// bit despite comparing equal.
3334 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3335                                             const TargetLibraryInfo *TLI,
3336                                             bool SignBitOnly,
3337                                             unsigned Depth) {
3338   // TODO: This function does not do the right thing when SignBitOnly is true
3339   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3340   // which flips the sign bits of NaNs.  See
3341   // https://llvm.org/bugs/show_bug.cgi?id=31702.
3342 
3343   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3344     return !CFP->getValueAPF().isNegative() ||
3345            (!SignBitOnly && CFP->getValueAPF().isZero());
3346   }
3347 
3348   // Handle vector of constants.
3349   if (auto *CV = dyn_cast<Constant>(V)) {
3350     if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3351       unsigned NumElts = CVFVTy->getNumElements();
3352       for (unsigned i = 0; i != NumElts; ++i) {
3353         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3354         if (!CFP)
3355           return false;
3356         if (CFP->getValueAPF().isNegative() &&
3357             (SignBitOnly || !CFP->getValueAPF().isZero()))
3358           return false;
3359       }
3360 
3361       // All non-negative ConstantFPs.
3362       return true;
3363     }
3364   }
3365 
3366   if (Depth == MaxAnalysisRecursionDepth)
3367     return false;
3368 
3369   const Operator *I = dyn_cast<Operator>(V);
3370   if (!I)
3371     return false;
3372 
3373   switch (I->getOpcode()) {
3374   default:
3375     break;
3376   // Unsigned integers are always nonnegative.
3377   case Instruction::UIToFP:
3378     return true;
3379   case Instruction::FMul:
3380   case Instruction::FDiv:
3381     // X * X is always non-negative or a NaN.
3382     // X / X is always exactly 1.0 or a NaN.
3383     if (I->getOperand(0) == I->getOperand(1) &&
3384         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3385       return true;
3386 
3387     LLVM_FALLTHROUGH;
3388   case Instruction::FAdd:
3389   case Instruction::FRem:
3390     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3391                                            Depth + 1) &&
3392            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3393                                            Depth + 1);
3394   case Instruction::Select:
3395     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3396                                            Depth + 1) &&
3397            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3398                                            Depth + 1);
3399   case Instruction::FPExt:
3400   case Instruction::FPTrunc:
3401     // Widening/narrowing never change sign.
3402     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3403                                            Depth + 1);
3404   case Instruction::ExtractElement:
3405     // Look through extract element. At the moment we keep this simple and skip
3406     // tracking the specific element. But at least we might find information
3407     // valid for all elements of the vector.
3408     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3409                                            Depth + 1);
3410   case Instruction::Call:
3411     const auto *CI = cast<CallInst>(I);
3412     Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3413     switch (IID) {
3414     default:
3415       break;
3416     case Intrinsic::maxnum: {
3417       Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3418       auto isPositiveNum = [&](Value *V) {
3419         if (SignBitOnly) {
3420           // With SignBitOnly, this is tricky because the result of
3421           // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3422           // a constant strictly greater than 0.0.
3423           const APFloat *C;
3424           return match(V, m_APFloat(C)) &&
3425                  *C > APFloat::getZero(C->getSemantics());
3426         }
3427 
3428         // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3429         // maxnum can't be ordered-less-than-zero.
3430         return isKnownNeverNaN(V, TLI) &&
3431                cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3432       };
3433 
3434       // TODO: This could be improved. We could also check that neither operand
3435       //       has its sign bit set (and at least 1 is not-NAN?).
3436       return isPositiveNum(V0) || isPositiveNum(V1);
3437     }
3438 
3439     case Intrinsic::maximum:
3440       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3441                                              Depth + 1) ||
3442              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3443                                              Depth + 1);
3444     case Intrinsic::minnum:
3445     case Intrinsic::minimum:
3446       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3447                                              Depth + 1) &&
3448              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3449                                              Depth + 1);
3450     case Intrinsic::exp:
3451     case Intrinsic::exp2:
3452     case Intrinsic::fabs:
3453       return true;
3454 
3455     case Intrinsic::sqrt:
3456       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3457       if (!SignBitOnly)
3458         return true;
3459       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3460                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3461 
3462     case Intrinsic::powi:
3463       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3464         // powi(x,n) is non-negative if n is even.
3465         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3466           return true;
3467       }
3468       // TODO: This is not correct.  Given that exp is an integer, here are the
3469       // ways that pow can return a negative value:
3470       //
3471       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3472       //   pow(-0, exp)   --> -inf if exp is negative odd.
3473       //   pow(-0, exp)   --> -0 if exp is positive odd.
3474       //   pow(-inf, exp) --> -0 if exp is negative odd.
3475       //   pow(-inf, exp) --> -inf if exp is positive odd.
3476       //
3477       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3478       // but we must return false if x == -0.  Unfortunately we do not currently
3479       // have a way of expressing this constraint.  See details in
3480       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3481       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3482                                              Depth + 1);
3483 
3484     case Intrinsic::fma:
3485     case Intrinsic::fmuladd:
3486       // x*x+y is non-negative if y is non-negative.
3487       return I->getOperand(0) == I->getOperand(1) &&
3488              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3489              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3490                                              Depth + 1);
3491     }
3492     break;
3493   }
3494   return false;
3495 }
3496 
3497 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3498                                        const TargetLibraryInfo *TLI) {
3499   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3500 }
3501 
3502 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3503   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3504 }
3505 
3506 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3507                                 unsigned Depth) {
3508   assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3509 
3510   // If we're told that infinities won't happen, assume they won't.
3511   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3512     if (FPMathOp->hasNoInfs())
3513       return true;
3514 
3515   // Handle scalar constants.
3516   if (auto *CFP = dyn_cast<ConstantFP>(V))
3517     return !CFP->isInfinity();
3518 
3519   if (Depth == MaxAnalysisRecursionDepth)
3520     return false;
3521 
3522   if (auto *Inst = dyn_cast<Instruction>(V)) {
3523     switch (Inst->getOpcode()) {
3524     case Instruction::Select: {
3525       return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3526              isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3527     }
3528     case Instruction::SIToFP:
3529     case Instruction::UIToFP: {
3530       // Get width of largest magnitude integer (remove a bit if signed).
3531       // This still works for a signed minimum value because the largest FP
3532       // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3533       int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3534       if (Inst->getOpcode() == Instruction::SIToFP)
3535         --IntSize;
3536 
3537       // If the exponent of the largest finite FP value can hold the largest
3538       // integer, the result of the cast must be finite.
3539       Type *FPTy = Inst->getType()->getScalarType();
3540       return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3541     }
3542     default:
3543       break;
3544     }
3545   }
3546 
3547   // try to handle fixed width vector constants
3548   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3549   if (VFVTy && isa<Constant>(V)) {
3550     // For vectors, verify that each element is not infinity.
3551     unsigned NumElts = VFVTy->getNumElements();
3552     for (unsigned i = 0; i != NumElts; ++i) {
3553       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3554       if (!Elt)
3555         return false;
3556       if (isa<UndefValue>(Elt))
3557         continue;
3558       auto *CElt = dyn_cast<ConstantFP>(Elt);
3559       if (!CElt || CElt->isInfinity())
3560         return false;
3561     }
3562     // All elements were confirmed non-infinity or undefined.
3563     return true;
3564   }
3565 
3566   // was not able to prove that V never contains infinity
3567   return false;
3568 }
3569 
3570 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3571                            unsigned Depth) {
3572   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3573 
3574   // If we're told that NaNs won't happen, assume they won't.
3575   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3576     if (FPMathOp->hasNoNaNs())
3577       return true;
3578 
3579   // Handle scalar constants.
3580   if (auto *CFP = dyn_cast<ConstantFP>(V))
3581     return !CFP->isNaN();
3582 
3583   if (Depth == MaxAnalysisRecursionDepth)
3584     return false;
3585 
3586   if (auto *Inst = dyn_cast<Instruction>(V)) {
3587     switch (Inst->getOpcode()) {
3588     case Instruction::FAdd:
3589     case Instruction::FSub:
3590       // Adding positive and negative infinity produces NaN.
3591       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3592              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3593              (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3594               isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3595 
3596     case Instruction::FMul:
3597       // Zero multiplied with infinity produces NaN.
3598       // FIXME: If neither side can be zero fmul never produces NaN.
3599       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3600              isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3601              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3602              isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3603 
3604     case Instruction::FDiv:
3605     case Instruction::FRem:
3606       // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3607       return false;
3608 
3609     case Instruction::Select: {
3610       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3611              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3612     }
3613     case Instruction::SIToFP:
3614     case Instruction::UIToFP:
3615       return true;
3616     case Instruction::FPTrunc:
3617     case Instruction::FPExt:
3618       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3619     default:
3620       break;
3621     }
3622   }
3623 
3624   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3625     switch (II->getIntrinsicID()) {
3626     case Intrinsic::canonicalize:
3627     case Intrinsic::fabs:
3628     case Intrinsic::copysign:
3629     case Intrinsic::exp:
3630     case Intrinsic::exp2:
3631     case Intrinsic::floor:
3632     case Intrinsic::ceil:
3633     case Intrinsic::trunc:
3634     case Intrinsic::rint:
3635     case Intrinsic::nearbyint:
3636     case Intrinsic::round:
3637     case Intrinsic::roundeven:
3638       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3639     case Intrinsic::sqrt:
3640       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3641              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3642     case Intrinsic::minnum:
3643     case Intrinsic::maxnum:
3644       // If either operand is not NaN, the result is not NaN.
3645       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3646              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3647     default:
3648       return false;
3649     }
3650   }
3651 
3652   // Try to handle fixed width vector constants
3653   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3654   if (VFVTy && isa<Constant>(V)) {
3655     // For vectors, verify that each element is not NaN.
3656     unsigned NumElts = VFVTy->getNumElements();
3657     for (unsigned i = 0; i != NumElts; ++i) {
3658       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3659       if (!Elt)
3660         return false;
3661       if (isa<UndefValue>(Elt))
3662         continue;
3663       auto *CElt = dyn_cast<ConstantFP>(Elt);
3664       if (!CElt || CElt->isNaN())
3665         return false;
3666     }
3667     // All elements were confirmed not-NaN or undefined.
3668     return true;
3669   }
3670 
3671   // Was not able to prove that V never contains NaN
3672   return false;
3673 }
3674 
3675 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3676 
3677   // All byte-wide stores are splatable, even of arbitrary variables.
3678   if (V->getType()->isIntegerTy(8))
3679     return V;
3680 
3681   LLVMContext &Ctx = V->getContext();
3682 
3683   // Undef don't care.
3684   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3685   if (isa<UndefValue>(V))
3686     return UndefInt8;
3687 
3688   // Return Undef for zero-sized type.
3689   if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3690     return UndefInt8;
3691 
3692   Constant *C = dyn_cast<Constant>(V);
3693   if (!C) {
3694     // Conceptually, we could handle things like:
3695     //   %a = zext i8 %X to i16
3696     //   %b = shl i16 %a, 8
3697     //   %c = or i16 %a, %b
3698     // but until there is an example that actually needs this, it doesn't seem
3699     // worth worrying about.
3700     return nullptr;
3701   }
3702 
3703   // Handle 'null' ConstantArrayZero etc.
3704   if (C->isNullValue())
3705     return Constant::getNullValue(Type::getInt8Ty(Ctx));
3706 
3707   // Constant floating-point values can be handled as integer values if the
3708   // corresponding integer value is "byteable".  An important case is 0.0.
3709   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3710     Type *Ty = nullptr;
3711     if (CFP->getType()->isHalfTy())
3712       Ty = Type::getInt16Ty(Ctx);
3713     else if (CFP->getType()->isFloatTy())
3714       Ty = Type::getInt32Ty(Ctx);
3715     else if (CFP->getType()->isDoubleTy())
3716       Ty = Type::getInt64Ty(Ctx);
3717     // Don't handle long double formats, which have strange constraints.
3718     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3719               : nullptr;
3720   }
3721 
3722   // We can handle constant integers that are multiple of 8 bits.
3723   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3724     if (CI->getBitWidth() % 8 == 0) {
3725       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3726       if (!CI->getValue().isSplat(8))
3727         return nullptr;
3728       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3729     }
3730   }
3731 
3732   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3733     if (CE->getOpcode() == Instruction::IntToPtr) {
3734       auto PS = DL.getPointerSizeInBits(
3735           cast<PointerType>(CE->getType())->getAddressSpace());
3736       return isBytewiseValue(
3737           ConstantExpr::getIntegerCast(CE->getOperand(0),
3738                                        Type::getIntNTy(Ctx, PS), false),
3739           DL);
3740     }
3741   }
3742 
3743   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3744     if (LHS == RHS)
3745       return LHS;
3746     if (!LHS || !RHS)
3747       return nullptr;
3748     if (LHS == UndefInt8)
3749       return RHS;
3750     if (RHS == UndefInt8)
3751       return LHS;
3752     return nullptr;
3753   };
3754 
3755   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3756     Value *Val = UndefInt8;
3757     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3758       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3759         return nullptr;
3760     return Val;
3761   }
3762 
3763   if (isa<ConstantAggregate>(C)) {
3764     Value *Val = UndefInt8;
3765     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3766       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3767         return nullptr;
3768     return Val;
3769   }
3770 
3771   // Don't try to handle the handful of other constants.
3772   return nullptr;
3773 }
3774 
3775 // This is the recursive version of BuildSubAggregate. It takes a few different
3776 // arguments. Idxs is the index within the nested struct From that we are
3777 // looking at now (which is of type IndexedType). IdxSkip is the number of
3778 // indices from Idxs that should be left out when inserting into the resulting
3779 // struct. To is the result struct built so far, new insertvalue instructions
3780 // build on that.
3781 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3782                                 SmallVectorImpl<unsigned> &Idxs,
3783                                 unsigned IdxSkip,
3784                                 Instruction *InsertBefore) {
3785   StructType *STy = dyn_cast<StructType>(IndexedType);
3786   if (STy) {
3787     // Save the original To argument so we can modify it
3788     Value *OrigTo = To;
3789     // General case, the type indexed by Idxs is a struct
3790     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3791       // Process each struct element recursively
3792       Idxs.push_back(i);
3793       Value *PrevTo = To;
3794       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3795                              InsertBefore);
3796       Idxs.pop_back();
3797       if (!To) {
3798         // Couldn't find any inserted value for this index? Cleanup
3799         while (PrevTo != OrigTo) {
3800           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3801           PrevTo = Del->getAggregateOperand();
3802           Del->eraseFromParent();
3803         }
3804         // Stop processing elements
3805         break;
3806       }
3807     }
3808     // If we successfully found a value for each of our subaggregates
3809     if (To)
3810       return To;
3811   }
3812   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3813   // the struct's elements had a value that was inserted directly. In the latter
3814   // case, perhaps we can't determine each of the subelements individually, but
3815   // we might be able to find the complete struct somewhere.
3816 
3817   // Find the value that is at that particular spot
3818   Value *V = FindInsertedValue(From, Idxs);
3819 
3820   if (!V)
3821     return nullptr;
3822 
3823   // Insert the value in the new (sub) aggregate
3824   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3825                                  "tmp", InsertBefore);
3826 }
3827 
3828 // This helper takes a nested struct and extracts a part of it (which is again a
3829 // struct) into a new value. For example, given the struct:
3830 // { a, { b, { c, d }, e } }
3831 // and the indices "1, 1" this returns
3832 // { c, d }.
3833 //
3834 // It does this by inserting an insertvalue for each element in the resulting
3835 // struct, as opposed to just inserting a single struct. This will only work if
3836 // each of the elements of the substruct are known (ie, inserted into From by an
3837 // insertvalue instruction somewhere).
3838 //
3839 // All inserted insertvalue instructions are inserted before InsertBefore
3840 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3841                                 Instruction *InsertBefore) {
3842   assert(InsertBefore && "Must have someplace to insert!");
3843   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3844                                                              idx_range);
3845   Value *To = UndefValue::get(IndexedType);
3846   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3847   unsigned IdxSkip = Idxs.size();
3848 
3849   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3850 }
3851 
3852 /// Given an aggregate and a sequence of indices, see if the scalar value
3853 /// indexed is already around as a register, for example if it was inserted
3854 /// directly into the aggregate.
3855 ///
3856 /// If InsertBefore is not null, this function will duplicate (modified)
3857 /// insertvalues when a part of a nested struct is extracted.
3858 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3859                                Instruction *InsertBefore) {
3860   // Nothing to index? Just return V then (this is useful at the end of our
3861   // recursion).
3862   if (idx_range.empty())
3863     return V;
3864   // We have indices, so V should have an indexable type.
3865   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3866          "Not looking at a struct or array?");
3867   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3868          "Invalid indices for type?");
3869 
3870   if (Constant *C = dyn_cast<Constant>(V)) {
3871     C = C->getAggregateElement(idx_range[0]);
3872     if (!C) return nullptr;
3873     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3874   }
3875 
3876   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3877     // Loop the indices for the insertvalue instruction in parallel with the
3878     // requested indices
3879     const unsigned *req_idx = idx_range.begin();
3880     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3881          i != e; ++i, ++req_idx) {
3882       if (req_idx == idx_range.end()) {
3883         // We can't handle this without inserting insertvalues
3884         if (!InsertBefore)
3885           return nullptr;
3886 
3887         // The requested index identifies a part of a nested aggregate. Handle
3888         // this specially. For example,
3889         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3890         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3891         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3892         // This can be changed into
3893         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3894         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3895         // which allows the unused 0,0 element from the nested struct to be
3896         // removed.
3897         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3898                                  InsertBefore);
3899       }
3900 
3901       // This insert value inserts something else than what we are looking for.
3902       // See if the (aggregate) value inserted into has the value we are
3903       // looking for, then.
3904       if (*req_idx != *i)
3905         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3906                                  InsertBefore);
3907     }
3908     // If we end up here, the indices of the insertvalue match with those
3909     // requested (though possibly only partially). Now we recursively look at
3910     // the inserted value, passing any remaining indices.
3911     return FindInsertedValue(I->getInsertedValueOperand(),
3912                              makeArrayRef(req_idx, idx_range.end()),
3913                              InsertBefore);
3914   }
3915 
3916   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3917     // If we're extracting a value from an aggregate that was extracted from
3918     // something else, we can extract from that something else directly instead.
3919     // However, we will need to chain I's indices with the requested indices.
3920 
3921     // Calculate the number of indices required
3922     unsigned size = I->getNumIndices() + idx_range.size();
3923     // Allocate some space to put the new indices in
3924     SmallVector<unsigned, 5> Idxs;
3925     Idxs.reserve(size);
3926     // Add indices from the extract value instruction
3927     Idxs.append(I->idx_begin(), I->idx_end());
3928 
3929     // Add requested indices
3930     Idxs.append(idx_range.begin(), idx_range.end());
3931 
3932     assert(Idxs.size() == size
3933            && "Number of indices added not correct?");
3934 
3935     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3936   }
3937   // Otherwise, we don't know (such as, extracting from a function return value
3938   // or load instruction)
3939   return nullptr;
3940 }
3941 
3942 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3943                                        unsigned CharSize) {
3944   // Make sure the GEP has exactly three arguments.
3945   if (GEP->getNumOperands() != 3)
3946     return false;
3947 
3948   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3949   // CharSize.
3950   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3951   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3952     return false;
3953 
3954   // Check to make sure that the first operand of the GEP is an integer and
3955   // has value 0 so that we are sure we're indexing into the initializer.
3956   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3957   if (!FirstIdx || !FirstIdx->isZero())
3958     return false;
3959 
3960   return true;
3961 }
3962 
3963 bool llvm::getConstantDataArrayInfo(const Value *V,
3964                                     ConstantDataArraySlice &Slice,
3965                                     unsigned ElementSize, uint64_t Offset) {
3966   assert(V);
3967 
3968   // Look through bitcast instructions and geps.
3969   V = V->stripPointerCasts();
3970 
3971   // If the value is a GEP instruction or constant expression, treat it as an
3972   // offset.
3973   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3974     // The GEP operator should be based on a pointer to string constant, and is
3975     // indexing into the string constant.
3976     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3977       return false;
3978 
3979     // If the second index isn't a ConstantInt, then this is a variable index
3980     // into the array.  If this occurs, we can't say anything meaningful about
3981     // the string.
3982     uint64_t StartIdx = 0;
3983     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3984       StartIdx = CI->getZExtValue();
3985     else
3986       return false;
3987     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3988                                     StartIdx + Offset);
3989   }
3990 
3991   // The GEP instruction, constant or instruction, must reference a global
3992   // variable that is a constant and is initialized. The referenced constant
3993   // initializer is the array that we'll use for optimization.
3994   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3995   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3996     return false;
3997 
3998   const ConstantDataArray *Array;
3999   ArrayType *ArrayTy;
4000   if (GV->getInitializer()->isNullValue()) {
4001     Type *GVTy = GV->getValueType();
4002     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
4003       // A zeroinitializer for the array; there is no ConstantDataArray.
4004       Array = nullptr;
4005     } else {
4006       const DataLayout &DL = GV->getParent()->getDataLayout();
4007       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4008       uint64_t Length = SizeInBytes / (ElementSize / 8);
4009       if (Length <= Offset)
4010         return false;
4011 
4012       Slice.Array = nullptr;
4013       Slice.Offset = 0;
4014       Slice.Length = Length - Offset;
4015       return true;
4016     }
4017   } else {
4018     // This must be a ConstantDataArray.
4019     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
4020     if (!Array)
4021       return false;
4022     ArrayTy = Array->getType();
4023   }
4024   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
4025     return false;
4026 
4027   uint64_t NumElts = ArrayTy->getArrayNumElements();
4028   if (Offset > NumElts)
4029     return false;
4030 
4031   Slice.Array = Array;
4032   Slice.Offset = Offset;
4033   Slice.Length = NumElts - Offset;
4034   return true;
4035 }
4036 
4037 /// This function computes the length of a null-terminated C string pointed to
4038 /// by V. If successful, it returns true and returns the string in Str.
4039 /// If unsuccessful, it returns false.
4040 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4041                                  uint64_t Offset, bool TrimAtNul) {
4042   ConstantDataArraySlice Slice;
4043   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4044     return false;
4045 
4046   if (Slice.Array == nullptr) {
4047     if (TrimAtNul) {
4048       Str = StringRef();
4049       return true;
4050     }
4051     if (Slice.Length == 1) {
4052       Str = StringRef("", 1);
4053       return true;
4054     }
4055     // We cannot instantiate a StringRef as we do not have an appropriate string
4056     // of 0s at hand.
4057     return false;
4058   }
4059 
4060   // Start out with the entire array in the StringRef.
4061   Str = Slice.Array->getAsString();
4062   // Skip over 'offset' bytes.
4063   Str = Str.substr(Slice.Offset);
4064 
4065   if (TrimAtNul) {
4066     // Trim off the \0 and anything after it.  If the array is not nul
4067     // terminated, we just return the whole end of string.  The client may know
4068     // some other way that the string is length-bound.
4069     Str = Str.substr(0, Str.find('\0'));
4070   }
4071   return true;
4072 }
4073 
4074 // These next two are very similar to the above, but also look through PHI
4075 // nodes.
4076 // TODO: See if we can integrate these two together.
4077 
4078 /// If we can compute the length of the string pointed to by
4079 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4080 static uint64_t GetStringLengthH(const Value *V,
4081                                  SmallPtrSetImpl<const PHINode*> &PHIs,
4082                                  unsigned CharSize) {
4083   // Look through noop bitcast instructions.
4084   V = V->stripPointerCasts();
4085 
4086   // If this is a PHI node, there are two cases: either we have already seen it
4087   // or we haven't.
4088   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4089     if (!PHIs.insert(PN).second)
4090       return ~0ULL;  // already in the set.
4091 
4092     // If it was new, see if all the input strings are the same length.
4093     uint64_t LenSoFar = ~0ULL;
4094     for (Value *IncValue : PN->incoming_values()) {
4095       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4096       if (Len == 0) return 0; // Unknown length -> unknown.
4097 
4098       if (Len == ~0ULL) continue;
4099 
4100       if (Len != LenSoFar && LenSoFar != ~0ULL)
4101         return 0;    // Disagree -> unknown.
4102       LenSoFar = Len;
4103     }
4104 
4105     // Success, all agree.
4106     return LenSoFar;
4107   }
4108 
4109   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4110   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4111     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4112     if (Len1 == 0) return 0;
4113     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4114     if (Len2 == 0) return 0;
4115     if (Len1 == ~0ULL) return Len2;
4116     if (Len2 == ~0ULL) return Len1;
4117     if (Len1 != Len2) return 0;
4118     return Len1;
4119   }
4120 
4121   // Otherwise, see if we can read the string.
4122   ConstantDataArraySlice Slice;
4123   if (!getConstantDataArrayInfo(V, Slice, CharSize))
4124     return 0;
4125 
4126   if (Slice.Array == nullptr)
4127     return 1;
4128 
4129   // Search for nul characters
4130   unsigned NullIndex = 0;
4131   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4132     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4133       break;
4134   }
4135 
4136   return NullIndex + 1;
4137 }
4138 
4139 /// If we can compute the length of the string pointed to by
4140 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4141 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4142   if (!V->getType()->isPointerTy())
4143     return 0;
4144 
4145   SmallPtrSet<const PHINode*, 32> PHIs;
4146   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4147   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4148   // an empty string as a length.
4149   return Len == ~0ULL ? 1 : Len;
4150 }
4151 
4152 const Value *
4153 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4154                                            bool MustPreserveNullness) {
4155   assert(Call &&
4156          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4157   if (const Value *RV = Call->getReturnedArgOperand())
4158     return RV;
4159   // This can be used only as a aliasing property.
4160   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4161           Call, MustPreserveNullness))
4162     return Call->getArgOperand(0);
4163   return nullptr;
4164 }
4165 
4166 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4167     const CallBase *Call, bool MustPreserveNullness) {
4168   switch (Call->getIntrinsicID()) {
4169   case Intrinsic::launder_invariant_group:
4170   case Intrinsic::strip_invariant_group:
4171   case Intrinsic::aarch64_irg:
4172   case Intrinsic::aarch64_tagp:
4173     return true;
4174   case Intrinsic::ptrmask:
4175     return !MustPreserveNullness;
4176   default:
4177     return false;
4178   }
4179 }
4180 
4181 /// \p PN defines a loop-variant pointer to an object.  Check if the
4182 /// previous iteration of the loop was referring to the same object as \p PN.
4183 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4184                                          const LoopInfo *LI) {
4185   // Find the loop-defined value.
4186   Loop *L = LI->getLoopFor(PN->getParent());
4187   if (PN->getNumIncomingValues() != 2)
4188     return true;
4189 
4190   // Find the value from previous iteration.
4191   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4192   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4193     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4194   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4195     return true;
4196 
4197   // If a new pointer is loaded in the loop, the pointer references a different
4198   // object in every iteration.  E.g.:
4199   //    for (i)
4200   //       int *p = a[i];
4201   //       ...
4202   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4203     if (!L->isLoopInvariant(Load->getPointerOperand()))
4204       return false;
4205   return true;
4206 }
4207 
4208 Value *llvm::getUnderlyingObject(Value *V, unsigned MaxLookup) {
4209   if (!V->getType()->isPointerTy())
4210     return V;
4211   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4212     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4213       V = GEP->getPointerOperand();
4214     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4215                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4216       V = cast<Operator>(V)->getOperand(0);
4217       if (!V->getType()->isPointerTy())
4218         return V;
4219     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
4220       if (GA->isInterposable())
4221         return V;
4222       V = GA->getAliasee();
4223     } else {
4224       if (auto *PHI = dyn_cast<PHINode>(V)) {
4225         // Look through single-arg phi nodes created by LCSSA.
4226         if (PHI->getNumIncomingValues() == 1) {
4227           V = PHI->getIncomingValue(0);
4228           continue;
4229         }
4230       } else if (auto *Call = dyn_cast<CallBase>(V)) {
4231         // CaptureTracking can know about special capturing properties of some
4232         // intrinsics like launder.invariant.group, that can't be expressed with
4233         // the attributes, but have properties like returning aliasing pointer.
4234         // Because some analysis may assume that nocaptured pointer is not
4235         // returned from some special intrinsic (because function would have to
4236         // be marked with returns attribute), it is crucial to use this function
4237         // because it should be in sync with CaptureTracking. Not using it may
4238         // cause weird miscompilations where 2 aliasing pointers are assumed to
4239         // noalias.
4240         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4241           V = RP;
4242           continue;
4243         }
4244       }
4245 
4246       return V;
4247     }
4248     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4249   }
4250   return V;
4251 }
4252 
4253 void llvm::getUnderlyingObjects(const Value *V,
4254                                 SmallVectorImpl<const Value *> &Objects,
4255                                 LoopInfo *LI, unsigned MaxLookup) {
4256   SmallPtrSet<const Value *, 4> Visited;
4257   SmallVector<const Value *, 4> Worklist;
4258   Worklist.push_back(V);
4259   do {
4260     const Value *P = Worklist.pop_back_val();
4261     P = getUnderlyingObject(P, MaxLookup);
4262 
4263     if (!Visited.insert(P).second)
4264       continue;
4265 
4266     if (auto *SI = dyn_cast<SelectInst>(P)) {
4267       Worklist.push_back(SI->getTrueValue());
4268       Worklist.push_back(SI->getFalseValue());
4269       continue;
4270     }
4271 
4272     if (auto *PN = dyn_cast<PHINode>(P)) {
4273       // If this PHI changes the underlying object in every iteration of the
4274       // loop, don't look through it.  Consider:
4275       //   int **A;
4276       //   for (i) {
4277       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
4278       //     Curr = A[i];
4279       //     *Prev, *Curr;
4280       //
4281       // Prev is tracking Curr one iteration behind so they refer to different
4282       // underlying objects.
4283       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4284           isSameUnderlyingObjectInLoop(PN, LI))
4285         for (Value *IncValue : PN->incoming_values())
4286           Worklist.push_back(IncValue);
4287       continue;
4288     }
4289 
4290     Objects.push_back(P);
4291   } while (!Worklist.empty());
4292 }
4293 
4294 /// This is the function that does the work of looking through basic
4295 /// ptrtoint+arithmetic+inttoptr sequences.
4296 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4297   do {
4298     if (const Operator *U = dyn_cast<Operator>(V)) {
4299       // If we find a ptrtoint, we can transfer control back to the
4300       // regular getUnderlyingObjectFromInt.
4301       if (U->getOpcode() == Instruction::PtrToInt)
4302         return U->getOperand(0);
4303       // If we find an add of a constant, a multiplied value, or a phi, it's
4304       // likely that the other operand will lead us to the base
4305       // object. We don't have to worry about the case where the
4306       // object address is somehow being computed by the multiply,
4307       // because our callers only care when the result is an
4308       // identifiable object.
4309       if (U->getOpcode() != Instruction::Add ||
4310           (!isa<ConstantInt>(U->getOperand(1)) &&
4311            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4312            !isa<PHINode>(U->getOperand(1))))
4313         return V;
4314       V = U->getOperand(0);
4315     } else {
4316       return V;
4317     }
4318     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4319   } while (true);
4320 }
4321 
4322 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4323 /// ptrtoint+arithmetic+inttoptr sequences.
4324 /// It returns false if unidentified object is found in getUnderlyingObjects.
4325 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4326                                           SmallVectorImpl<Value *> &Objects) {
4327   SmallPtrSet<const Value *, 16> Visited;
4328   SmallVector<const Value *, 4> Working(1, V);
4329   do {
4330     V = Working.pop_back_val();
4331 
4332     SmallVector<const Value *, 4> Objs;
4333     getUnderlyingObjects(V, Objs);
4334 
4335     for (const Value *V : Objs) {
4336       if (!Visited.insert(V).second)
4337         continue;
4338       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4339         const Value *O =
4340           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4341         if (O->getType()->isPointerTy()) {
4342           Working.push_back(O);
4343           continue;
4344         }
4345       }
4346       // If getUnderlyingObjects fails to find an identifiable object,
4347       // getUnderlyingObjectsForCodeGen also fails for safety.
4348       if (!isIdentifiedObject(V)) {
4349         Objects.clear();
4350         return false;
4351       }
4352       Objects.push_back(const_cast<Value *>(V));
4353     }
4354   } while (!Working.empty());
4355   return true;
4356 }
4357 
4358 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4359   AllocaInst *Result = nullptr;
4360   SmallPtrSet<Value *, 4> Visited;
4361   SmallVector<Value *, 4> Worklist;
4362 
4363   auto AddWork = [&](Value *V) {
4364     if (Visited.insert(V).second)
4365       Worklist.push_back(V);
4366   };
4367 
4368   AddWork(V);
4369   do {
4370     V = Worklist.pop_back_val();
4371     assert(Visited.count(V));
4372 
4373     if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4374       if (Result && Result != AI)
4375         return nullptr;
4376       Result = AI;
4377     } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4378       AddWork(CI->getOperand(0));
4379     } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4380       for (Value *IncValue : PN->incoming_values())
4381         AddWork(IncValue);
4382     } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4383       AddWork(SI->getTrueValue());
4384       AddWork(SI->getFalseValue());
4385     } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4386       if (OffsetZero && !GEP->hasAllZeroIndices())
4387         return nullptr;
4388       AddWork(GEP->getPointerOperand());
4389     } else {
4390       return nullptr;
4391     }
4392   } while (!Worklist.empty());
4393 
4394   return Result;
4395 }
4396 
4397 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4398     const Value *V, bool AllowLifetime, bool AllowDroppable) {
4399   for (const User *U : V->users()) {
4400     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4401     if (!II)
4402       return false;
4403 
4404     if (AllowLifetime && II->isLifetimeStartOrEnd())
4405       continue;
4406 
4407     if (AllowDroppable && II->isDroppable())
4408       continue;
4409 
4410     return false;
4411   }
4412   return true;
4413 }
4414 
4415 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4416   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4417       V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4418 }
4419 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4420   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4421       V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4422 }
4423 
4424 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4425   if (!LI.isUnordered())
4426     return true;
4427   const Function &F = *LI.getFunction();
4428   // Speculative load may create a race that did not exist in the source.
4429   return F.hasFnAttribute(Attribute::SanitizeThread) ||
4430     // Speculative load may load data from dirty regions.
4431     F.hasFnAttribute(Attribute::SanitizeAddress) ||
4432     F.hasFnAttribute(Attribute::SanitizeHWAddress);
4433 }
4434 
4435 
4436 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4437                                         const Instruction *CtxI,
4438                                         const DominatorTree *DT) {
4439   const Operator *Inst = dyn_cast<Operator>(V);
4440   if (!Inst)
4441     return false;
4442 
4443   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4444     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4445       if (C->canTrap())
4446         return false;
4447 
4448   switch (Inst->getOpcode()) {
4449   default:
4450     return true;
4451   case Instruction::UDiv:
4452   case Instruction::URem: {
4453     // x / y is undefined if y == 0.
4454     const APInt *V;
4455     if (match(Inst->getOperand(1), m_APInt(V)))
4456       return *V != 0;
4457     return false;
4458   }
4459   case Instruction::SDiv:
4460   case Instruction::SRem: {
4461     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4462     const APInt *Numerator, *Denominator;
4463     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4464       return false;
4465     // We cannot hoist this division if the denominator is 0.
4466     if (*Denominator == 0)
4467       return false;
4468     // It's safe to hoist if the denominator is not 0 or -1.
4469     if (*Denominator != -1)
4470       return true;
4471     // At this point we know that the denominator is -1.  It is safe to hoist as
4472     // long we know that the numerator is not INT_MIN.
4473     if (match(Inst->getOperand(0), m_APInt(Numerator)))
4474       return !Numerator->isMinSignedValue();
4475     // The numerator *might* be MinSignedValue.
4476     return false;
4477   }
4478   case Instruction::Load: {
4479     const LoadInst *LI = cast<LoadInst>(Inst);
4480     if (mustSuppressSpeculation(*LI))
4481       return false;
4482     const DataLayout &DL = LI->getModule()->getDataLayout();
4483     return isDereferenceableAndAlignedPointer(
4484         LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4485         DL, CtxI, DT);
4486   }
4487   case Instruction::Call: {
4488     auto *CI = cast<const CallInst>(Inst);
4489     const Function *Callee = CI->getCalledFunction();
4490 
4491     // The called function could have undefined behavior or side-effects, even
4492     // if marked readnone nounwind.
4493     return Callee && Callee->isSpeculatable();
4494   }
4495   case Instruction::VAArg:
4496   case Instruction::Alloca:
4497   case Instruction::Invoke:
4498   case Instruction::CallBr:
4499   case Instruction::PHI:
4500   case Instruction::Store:
4501   case Instruction::Ret:
4502   case Instruction::Br:
4503   case Instruction::IndirectBr:
4504   case Instruction::Switch:
4505   case Instruction::Unreachable:
4506   case Instruction::Fence:
4507   case Instruction::AtomicRMW:
4508   case Instruction::AtomicCmpXchg:
4509   case Instruction::LandingPad:
4510   case Instruction::Resume:
4511   case Instruction::CatchSwitch:
4512   case Instruction::CatchPad:
4513   case Instruction::CatchRet:
4514   case Instruction::CleanupPad:
4515   case Instruction::CleanupRet:
4516     return false; // Misc instructions which have effects
4517   }
4518 }
4519 
4520 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4521   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4522 }
4523 
4524 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4525 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4526   switch (OR) {
4527     case ConstantRange::OverflowResult::MayOverflow:
4528       return OverflowResult::MayOverflow;
4529     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4530       return OverflowResult::AlwaysOverflowsLow;
4531     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4532       return OverflowResult::AlwaysOverflowsHigh;
4533     case ConstantRange::OverflowResult::NeverOverflows:
4534       return OverflowResult::NeverOverflows;
4535   }
4536   llvm_unreachable("Unknown OverflowResult");
4537 }
4538 
4539 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4540 static ConstantRange computeConstantRangeIncludingKnownBits(
4541     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4542     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4543     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4544   KnownBits Known = computeKnownBits(
4545       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4546   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4547   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4548   ConstantRange::PreferredRangeType RangeType =
4549       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4550   return CR1.intersectWith(CR2, RangeType);
4551 }
4552 
4553 OverflowResult llvm::computeOverflowForUnsignedMul(
4554     const Value *LHS, const Value *RHS, const DataLayout &DL,
4555     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4556     bool UseInstrInfo) {
4557   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4558                                         nullptr, UseInstrInfo);
4559   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4560                                         nullptr, UseInstrInfo);
4561   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4562   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4563   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4564 }
4565 
4566 OverflowResult
4567 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4568                                   const DataLayout &DL, AssumptionCache *AC,
4569                                   const Instruction *CxtI,
4570                                   const DominatorTree *DT, bool UseInstrInfo) {
4571   // Multiplying n * m significant bits yields a result of n + m significant
4572   // bits. If the total number of significant bits does not exceed the
4573   // result bit width (minus 1), there is no overflow.
4574   // This means if we have enough leading sign bits in the operands
4575   // we can guarantee that the result does not overflow.
4576   // Ref: "Hacker's Delight" by Henry Warren
4577   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4578 
4579   // Note that underestimating the number of sign bits gives a more
4580   // conservative answer.
4581   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4582                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4583 
4584   // First handle the easy case: if we have enough sign bits there's
4585   // definitely no overflow.
4586   if (SignBits > BitWidth + 1)
4587     return OverflowResult::NeverOverflows;
4588 
4589   // There are two ambiguous cases where there can be no overflow:
4590   //   SignBits == BitWidth + 1    and
4591   //   SignBits == BitWidth
4592   // The second case is difficult to check, therefore we only handle the
4593   // first case.
4594   if (SignBits == BitWidth + 1) {
4595     // It overflows only when both arguments are negative and the true
4596     // product is exactly the minimum negative number.
4597     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4598     // For simplicity we just check if at least one side is not negative.
4599     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4600                                           nullptr, UseInstrInfo);
4601     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4602                                           nullptr, UseInstrInfo);
4603     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4604       return OverflowResult::NeverOverflows;
4605   }
4606   return OverflowResult::MayOverflow;
4607 }
4608 
4609 OverflowResult llvm::computeOverflowForUnsignedAdd(
4610     const Value *LHS, const Value *RHS, const DataLayout &DL,
4611     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4612     bool UseInstrInfo) {
4613   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4614       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4615       nullptr, UseInstrInfo);
4616   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4617       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4618       nullptr, UseInstrInfo);
4619   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4620 }
4621 
4622 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4623                                                   const Value *RHS,
4624                                                   const AddOperator *Add,
4625                                                   const DataLayout &DL,
4626                                                   AssumptionCache *AC,
4627                                                   const Instruction *CxtI,
4628                                                   const DominatorTree *DT) {
4629   if (Add && Add->hasNoSignedWrap()) {
4630     return OverflowResult::NeverOverflows;
4631   }
4632 
4633   // If LHS and RHS each have at least two sign bits, the addition will look
4634   // like
4635   //
4636   // XX..... +
4637   // YY.....
4638   //
4639   // If the carry into the most significant position is 0, X and Y can't both
4640   // be 1 and therefore the carry out of the addition is also 0.
4641   //
4642   // If the carry into the most significant position is 1, X and Y can't both
4643   // be 0 and therefore the carry out of the addition is also 1.
4644   //
4645   // Since the carry into the most significant position is always equal to
4646   // the carry out of the addition, there is no signed overflow.
4647   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4648       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4649     return OverflowResult::NeverOverflows;
4650 
4651   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4652       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4653   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4654       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4655   OverflowResult OR =
4656       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4657   if (OR != OverflowResult::MayOverflow)
4658     return OR;
4659 
4660   // The remaining code needs Add to be available. Early returns if not so.
4661   if (!Add)
4662     return OverflowResult::MayOverflow;
4663 
4664   // If the sign of Add is the same as at least one of the operands, this add
4665   // CANNOT overflow. If this can be determined from the known bits of the
4666   // operands the above signedAddMayOverflow() check will have already done so.
4667   // The only other way to improve on the known bits is from an assumption, so
4668   // call computeKnownBitsFromAssume() directly.
4669   bool LHSOrRHSKnownNonNegative =
4670       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4671   bool LHSOrRHSKnownNegative =
4672       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4673   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4674     KnownBits AddKnown(LHSRange.getBitWidth());
4675     computeKnownBitsFromAssume(
4676         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4677     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4678         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4679       return OverflowResult::NeverOverflows;
4680   }
4681 
4682   return OverflowResult::MayOverflow;
4683 }
4684 
4685 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4686                                                    const Value *RHS,
4687                                                    const DataLayout &DL,
4688                                                    AssumptionCache *AC,
4689                                                    const Instruction *CxtI,
4690                                                    const DominatorTree *DT) {
4691   // Checking for conditions implied by dominating conditions may be expensive.
4692   // Limit it to usub_with_overflow calls for now.
4693   if (match(CxtI,
4694             m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4695     if (auto C =
4696             isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4697       if (*C)
4698         return OverflowResult::NeverOverflows;
4699       return OverflowResult::AlwaysOverflowsLow;
4700     }
4701   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4702       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4703   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4704       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4705   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4706 }
4707 
4708 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4709                                                  const Value *RHS,
4710                                                  const DataLayout &DL,
4711                                                  AssumptionCache *AC,
4712                                                  const Instruction *CxtI,
4713                                                  const DominatorTree *DT) {
4714   // If LHS and RHS each have at least two sign bits, the subtraction
4715   // cannot overflow.
4716   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4717       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4718     return OverflowResult::NeverOverflows;
4719 
4720   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4721       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4722   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4723       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4724   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4725 }
4726 
4727 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4728                                      const DominatorTree &DT) {
4729   SmallVector<const BranchInst *, 2> GuardingBranches;
4730   SmallVector<const ExtractValueInst *, 2> Results;
4731 
4732   for (const User *U : WO->users()) {
4733     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4734       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4735 
4736       if (EVI->getIndices()[0] == 0)
4737         Results.push_back(EVI);
4738       else {
4739         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4740 
4741         for (const auto *U : EVI->users())
4742           if (const auto *B = dyn_cast<BranchInst>(U)) {
4743             assert(B->isConditional() && "How else is it using an i1?");
4744             GuardingBranches.push_back(B);
4745           }
4746       }
4747     } else {
4748       // We are using the aggregate directly in a way we don't want to analyze
4749       // here (storing it to a global, say).
4750       return false;
4751     }
4752   }
4753 
4754   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4755     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4756     if (!NoWrapEdge.isSingleEdge())
4757       return false;
4758 
4759     // Check if all users of the add are provably no-wrap.
4760     for (const auto *Result : Results) {
4761       // If the extractvalue itself is not executed on overflow, the we don't
4762       // need to check each use separately, since domination is transitive.
4763       if (DT.dominates(NoWrapEdge, Result->getParent()))
4764         continue;
4765 
4766       for (auto &RU : Result->uses())
4767         if (!DT.dominates(NoWrapEdge, RU))
4768           return false;
4769     }
4770 
4771     return true;
4772   };
4773 
4774   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4775 }
4776 
4777 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4778   // See whether I has flags that may create poison
4779   if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4780     if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4781       return true;
4782   }
4783   if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4784     if (ExactOp->isExact())
4785       return true;
4786   if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4787     auto FMF = FP->getFastMathFlags();
4788     if (FMF.noNaNs() || FMF.noInfs())
4789       return true;
4790   }
4791 
4792   unsigned Opcode = Op->getOpcode();
4793 
4794   // Check whether opcode is a poison/undef-generating operation
4795   switch (Opcode) {
4796   case Instruction::Shl:
4797   case Instruction::AShr:
4798   case Instruction::LShr: {
4799     // Shifts return poison if shiftwidth is larger than the bitwidth.
4800     if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4801       SmallVector<Constant *, 4> ShiftAmounts;
4802       if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4803         unsigned NumElts = FVTy->getNumElements();
4804         for (unsigned i = 0; i < NumElts; ++i)
4805           ShiftAmounts.push_back(C->getAggregateElement(i));
4806       } else if (isa<ScalableVectorType>(C->getType()))
4807         return true; // Can't tell, just return true to be safe
4808       else
4809         ShiftAmounts.push_back(C);
4810 
4811       bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4812         auto *CI = dyn_cast<ConstantInt>(C);
4813         return CI && CI->getZExtValue() < C->getType()->getIntegerBitWidth();
4814       });
4815       return !Safe;
4816     }
4817     return true;
4818   }
4819   case Instruction::FPToSI:
4820   case Instruction::FPToUI:
4821     // fptosi/ui yields poison if the resulting value does not fit in the
4822     // destination type.
4823     return true;
4824   case Instruction::Call:
4825   case Instruction::CallBr:
4826   case Instruction::Invoke: {
4827     const auto *CB = cast<CallBase>(Op);
4828     return !CB->hasRetAttr(Attribute::NoUndef);
4829   }
4830   case Instruction::InsertElement:
4831   case Instruction::ExtractElement: {
4832     // If index exceeds the length of the vector, it returns poison
4833     auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4834     unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4835     auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4836     if (!Idx ||
4837         Idx->getZExtValue() >= VTy->getElementCount().getKnownMinValue())
4838       return true;
4839     return false;
4840   }
4841   case Instruction::ShuffleVector: {
4842     // shufflevector may return undef.
4843     if (PoisonOnly)
4844       return false;
4845     ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4846                              ? cast<ConstantExpr>(Op)->getShuffleMask()
4847                              : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4848     return any_of(Mask, [](int Elt) { return Elt == UndefMaskElem; });
4849   }
4850   case Instruction::FNeg:
4851   case Instruction::PHI:
4852   case Instruction::Select:
4853   case Instruction::URem:
4854   case Instruction::SRem:
4855   case Instruction::ExtractValue:
4856   case Instruction::InsertValue:
4857   case Instruction::Freeze:
4858   case Instruction::ICmp:
4859   case Instruction::FCmp:
4860     return false;
4861   case Instruction::GetElementPtr: {
4862     const auto *GEP = cast<GEPOperator>(Op);
4863     return GEP->isInBounds();
4864   }
4865   default: {
4866     const auto *CE = dyn_cast<ConstantExpr>(Op);
4867     if (isa<CastInst>(Op) || (CE && CE->isCast()))
4868       return false;
4869     else if (Instruction::isBinaryOp(Opcode))
4870       return false;
4871     // Be conservative and return true.
4872     return true;
4873   }
4874   }
4875 }
4876 
4877 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
4878   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
4879 }
4880 
4881 bool llvm::canCreatePoison(const Operator *Op) {
4882   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
4883 }
4884 
4885 static bool programUndefinedIfUndefOrPoison(const Value *V,
4886                                             bool PoisonOnly);
4887 
4888 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
4889                                              const Instruction *CtxI,
4890                                              const DominatorTree *DT,
4891                                              unsigned Depth, bool PoisonOnly) {
4892   if (Depth >= MaxAnalysisRecursionDepth)
4893     return false;
4894 
4895   if (isa<MetadataAsValue>(V))
4896     return false;
4897 
4898   if (const auto *A = dyn_cast<Argument>(V)) {
4899     if (A->hasAttribute(Attribute::NoUndef))
4900       return true;
4901   }
4902 
4903   if (auto *C = dyn_cast<Constant>(V)) {
4904     if (isa<UndefValue>(C))
4905       return PoisonOnly;
4906 
4907     if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
4908         isa<ConstantPointerNull>(C) || isa<Function>(C))
4909       return true;
4910 
4911     if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
4912       return (PoisonOnly || !C->containsUndefElement()) &&
4913              !C->containsConstantExpression();
4914   }
4915 
4916   // Strip cast operations from a pointer value.
4917   // Note that stripPointerCastsSameRepresentation can strip off getelementptr
4918   // inbounds with zero offset. To guarantee that the result isn't poison, the
4919   // stripped pointer is checked as it has to be pointing into an allocated
4920   // object or be null `null` to ensure `inbounds` getelement pointers with a
4921   // zero offset could not produce poison.
4922   // It can strip off addrspacecast that do not change bit representation as
4923   // well. We believe that such addrspacecast is equivalent to no-op.
4924   auto *StrippedV = V->stripPointerCastsSameRepresentation();
4925   if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
4926       isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
4927     return true;
4928 
4929   auto OpCheck = [&](const Value *V) {
4930     return isGuaranteedNotToBeUndefOrPoison(V, CtxI, DT, Depth + 1, PoisonOnly);
4931   };
4932 
4933   if (auto *Opr = dyn_cast<Operator>(V)) {
4934     // If the value is a freeze instruction, then it can never
4935     // be undef or poison.
4936     if (isa<FreezeInst>(V))
4937       return true;
4938 
4939     if (const auto *CB = dyn_cast<CallBase>(V)) {
4940       if (CB->hasRetAttr(Attribute::NoUndef))
4941         return true;
4942     }
4943 
4944     if (const auto *PN = dyn_cast<PHINode>(V)) {
4945       unsigned Num = PN->getNumIncomingValues();
4946       bool IsWellDefined = true;
4947       for (unsigned i = 0; i < Num; ++i) {
4948         auto *TI = PN->getIncomingBlock(i)->getTerminator();
4949         if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), TI, DT,
4950                                               Depth + 1, PoisonOnly)) {
4951           IsWellDefined = false;
4952           break;
4953         }
4954       }
4955       if (IsWellDefined)
4956         return true;
4957     } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
4958       return true;
4959   }
4960 
4961   if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
4962     return true;
4963 
4964   // CxtI may be null or a cloned instruction.
4965   if (!CtxI || !CtxI->getParent() || !DT)
4966     return false;
4967 
4968   auto *DNode = DT->getNode(CtxI->getParent());
4969   if (!DNode)
4970     // Unreachable block
4971     return false;
4972 
4973   // If V is used as a branch condition before reaching CtxI, V cannot be
4974   // undef or poison.
4975   //   br V, BB1, BB2
4976   // BB1:
4977   //   CtxI ; V cannot be undef or poison here
4978   auto *Dominator = DNode->getIDom();
4979   while (Dominator) {
4980     auto *TI = Dominator->getBlock()->getTerminator();
4981 
4982     Value *Cond = nullptr;
4983     if (auto BI = dyn_cast<BranchInst>(TI)) {
4984       if (BI->isConditional())
4985         Cond = BI->getCondition();
4986     } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
4987       Cond = SI->getCondition();
4988     }
4989 
4990     if (Cond) {
4991       if (Cond == V)
4992         return true;
4993       else if (PoisonOnly && isa<Operator>(Cond)) {
4994         // For poison, we can analyze further
4995         auto *Opr = cast<Operator>(Cond);
4996         if (propagatesPoison(Opr) &&
4997             any_of(Opr->operand_values(), [&](Value *Op) { return Op == V; }))
4998           return true;
4999       }
5000     }
5001 
5002     Dominator = Dominator->getIDom();
5003   }
5004 
5005   return false;
5006 }
5007 
5008 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V,
5009                                             const Instruction *CtxI,
5010                                             const DominatorTree *DT,
5011                                             unsigned Depth) {
5012   return ::isGuaranteedNotToBeUndefOrPoison(V, CtxI, DT, Depth, false);
5013 }
5014 
5015 bool llvm::isGuaranteedNotToBePoison(const Value *V, const Instruction *CtxI,
5016                                      const DominatorTree *DT, unsigned Depth) {
5017   return ::isGuaranteedNotToBeUndefOrPoison(V, CtxI, DT, Depth, true);
5018 }
5019 
5020 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5021                                                  const DataLayout &DL,
5022                                                  AssumptionCache *AC,
5023                                                  const Instruction *CxtI,
5024                                                  const DominatorTree *DT) {
5025   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5026                                        Add, DL, AC, CxtI, DT);
5027 }
5028 
5029 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5030                                                  const Value *RHS,
5031                                                  const DataLayout &DL,
5032                                                  AssumptionCache *AC,
5033                                                  const Instruction *CxtI,
5034                                                  const DominatorTree *DT) {
5035   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5036 }
5037 
5038 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5039   // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5040   // of time because it's possible for another thread to interfere with it for an
5041   // arbitrary length of time, but programs aren't allowed to rely on that.
5042 
5043   // If there is no successor, then execution can't transfer to it.
5044   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
5045     return !CRI->unwindsToCaller();
5046   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
5047     return !CatchSwitch->unwindsToCaller();
5048   if (isa<ResumeInst>(I))
5049     return false;
5050   if (isa<ReturnInst>(I))
5051     return false;
5052   if (isa<UnreachableInst>(I))
5053     return false;
5054 
5055   // Calls can throw, or contain an infinite loop, or kill the process.
5056   if (const auto *CB = dyn_cast<CallBase>(I)) {
5057     // Call sites that throw have implicit non-local control flow.
5058     if (!CB->doesNotThrow())
5059       return false;
5060 
5061     // A function which doens't throw and has "willreturn" attribute will
5062     // always return.
5063     if (CB->hasFnAttr(Attribute::WillReturn))
5064       return true;
5065 
5066     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
5067     // etc. and thus not return.  However, LLVM already assumes that
5068     //
5069     //  - Thread exiting actions are modeled as writes to memory invisible to
5070     //    the program.
5071     //
5072     //  - Loops that don't have side effects (side effects are volatile/atomic
5073     //    stores and IO) always terminate (see http://llvm.org/PR965).
5074     //    Furthermore IO itself is also modeled as writes to memory invisible to
5075     //    the program.
5076     //
5077     // We rely on those assumptions here, and use the memory effects of the call
5078     // target as a proxy for checking that it always returns.
5079 
5080     // FIXME: This isn't aggressive enough; a call which only writes to a global
5081     // is guaranteed to return.
5082     return CB->onlyReadsMemory() || CB->onlyAccessesArgMemory();
5083   }
5084 
5085   // Other instructions return normally.
5086   return true;
5087 }
5088 
5089 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5090   // TODO: This is slightly conservative for invoke instruction since exiting
5091   // via an exception *is* normal control for them.
5092   for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
5093     if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
5094       return false;
5095   return true;
5096 }
5097 
5098 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5099                                                   const Loop *L) {
5100   // The loop header is guaranteed to be executed for every iteration.
5101   //
5102   // FIXME: Relax this constraint to cover all basic blocks that are
5103   // guaranteed to be executed at every iteration.
5104   if (I->getParent() != L->getHeader()) return false;
5105 
5106   for (const Instruction &LI : *L->getHeader()) {
5107     if (&LI == I) return true;
5108     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5109   }
5110   llvm_unreachable("Instruction not contained in its own parent basic block.");
5111 }
5112 
5113 bool llvm::propagatesPoison(const Operator *I) {
5114   switch (I->getOpcode()) {
5115   case Instruction::Freeze:
5116   case Instruction::Select:
5117   case Instruction::PHI:
5118   case Instruction::Call:
5119   case Instruction::Invoke:
5120     return false;
5121   case Instruction::ICmp:
5122   case Instruction::FCmp:
5123   case Instruction::GetElementPtr:
5124     return true;
5125   default:
5126     if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5127       return true;
5128 
5129     // Be conservative and return false.
5130     return false;
5131   }
5132 }
5133 
5134 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5135                                      SmallPtrSetImpl<const Value *> &Operands) {
5136   switch (I->getOpcode()) {
5137     case Instruction::Store:
5138       Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5139       break;
5140 
5141     case Instruction::Load:
5142       Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5143       break;
5144 
5145     case Instruction::AtomicCmpXchg:
5146       Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5147       break;
5148 
5149     case Instruction::AtomicRMW:
5150       Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5151       break;
5152 
5153     case Instruction::UDiv:
5154     case Instruction::SDiv:
5155     case Instruction::URem:
5156     case Instruction::SRem:
5157       Operands.insert(I->getOperand(1));
5158       break;
5159 
5160     case Instruction::Call:
5161     case Instruction::Invoke: {
5162       const CallBase *CB = cast<CallBase>(I);
5163       if (CB->isIndirectCall())
5164         Operands.insert(CB->getCalledOperand());
5165       for (unsigned i = 0; i < CB->arg_size(); ++i) {
5166         if (CB->paramHasAttr(i, Attribute::NoUndef))
5167           Operands.insert(CB->getArgOperand(i));
5168       }
5169       break;
5170     }
5171 
5172     default:
5173       break;
5174   }
5175 }
5176 
5177 bool llvm::mustTriggerUB(const Instruction *I,
5178                          const SmallSet<const Value *, 16>& KnownPoison) {
5179   SmallPtrSet<const Value *, 4> NonPoisonOps;
5180   getGuaranteedNonPoisonOps(I, NonPoisonOps);
5181 
5182   for (const auto *V : NonPoisonOps)
5183     if (KnownPoison.count(V))
5184       return true;
5185 
5186   return false;
5187 }
5188 
5189 static bool programUndefinedIfUndefOrPoison(const Value *V,
5190                                             bool PoisonOnly) {
5191   // We currently only look for uses of values within the same basic
5192   // block, as that makes it easier to guarantee that the uses will be
5193   // executed given that Inst is executed.
5194   //
5195   // FIXME: Expand this to consider uses beyond the same basic block. To do
5196   // this, look out for the distinction between post-dominance and strong
5197   // post-dominance.
5198   const BasicBlock *BB = nullptr;
5199   BasicBlock::const_iterator Begin;
5200   if (const auto *Inst = dyn_cast<Instruction>(V)) {
5201     BB = Inst->getParent();
5202     Begin = Inst->getIterator();
5203     Begin++;
5204   } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5205     BB = &Arg->getParent()->getEntryBlock();
5206     Begin = BB->begin();
5207   } else {
5208     return false;
5209   }
5210 
5211   BasicBlock::const_iterator End = BB->end();
5212 
5213   if (!PoisonOnly) {
5214     // Be conservative & just check whether a value is passed to a noundef
5215     // argument.
5216     // Instructions that raise UB with a poison operand are well-defined
5217     // or have unclear semantics when the input is partially undef.
5218     // For example, 'udiv x, (undef | 1)' isn't UB.
5219 
5220     for (auto &I : make_range(Begin, End)) {
5221       if (const auto *CB = dyn_cast<CallBase>(&I)) {
5222         for (unsigned i = 0; i < CB->arg_size(); ++i) {
5223           if (CB->paramHasAttr(i, Attribute::NoUndef) &&
5224               CB->getArgOperand(i) == V)
5225             return true;
5226         }
5227       }
5228       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5229         break;
5230     }
5231     return false;
5232   }
5233 
5234   // Set of instructions that we have proved will yield poison if Inst
5235   // does.
5236   SmallSet<const Value *, 16> YieldsPoison;
5237   SmallSet<const BasicBlock *, 4> Visited;
5238 
5239   YieldsPoison.insert(V);
5240   auto Propagate = [&](const User *User) {
5241     if (propagatesPoison(cast<Operator>(User)))
5242       YieldsPoison.insert(User);
5243   };
5244   for_each(V->users(), Propagate);
5245   Visited.insert(BB);
5246 
5247   unsigned Iter = 0;
5248   while (Iter++ < MaxAnalysisRecursionDepth) {
5249     for (auto &I : make_range(Begin, End)) {
5250       if (mustTriggerUB(&I, YieldsPoison))
5251         return true;
5252       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5253         return false;
5254 
5255       // Mark poison that propagates from I through uses of I.
5256       if (YieldsPoison.count(&I))
5257         for_each(I.users(), Propagate);
5258     }
5259 
5260     if (auto *NextBB = BB->getSingleSuccessor()) {
5261       if (Visited.insert(NextBB).second) {
5262         BB = NextBB;
5263         Begin = BB->getFirstNonPHI()->getIterator();
5264         End = BB->end();
5265         continue;
5266       }
5267     }
5268 
5269     break;
5270   }
5271   return false;
5272 }
5273 
5274 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5275   return ::programUndefinedIfUndefOrPoison(Inst, false);
5276 }
5277 
5278 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5279   return ::programUndefinedIfUndefOrPoison(Inst, true);
5280 }
5281 
5282 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5283   if (FMF.noNaNs())
5284     return true;
5285 
5286   if (auto *C = dyn_cast<ConstantFP>(V))
5287     return !C->isNaN();
5288 
5289   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5290     if (!C->getElementType()->isFloatingPointTy())
5291       return false;
5292     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5293       if (C->getElementAsAPFloat(I).isNaN())
5294         return false;
5295     }
5296     return true;
5297   }
5298 
5299   if (isa<ConstantAggregateZero>(V))
5300     return true;
5301 
5302   return false;
5303 }
5304 
5305 static bool isKnownNonZero(const Value *V) {
5306   if (auto *C = dyn_cast<ConstantFP>(V))
5307     return !C->isZero();
5308 
5309   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5310     if (!C->getElementType()->isFloatingPointTy())
5311       return false;
5312     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5313       if (C->getElementAsAPFloat(I).isZero())
5314         return false;
5315     }
5316     return true;
5317   }
5318 
5319   return false;
5320 }
5321 
5322 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5323 /// Given non-min/max outer cmp/select from the clamp pattern this
5324 /// function recognizes if it can be substitued by a "canonical" min/max
5325 /// pattern.
5326 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5327                                                Value *CmpLHS, Value *CmpRHS,
5328                                                Value *TrueVal, Value *FalseVal,
5329                                                Value *&LHS, Value *&RHS) {
5330   // Try to match
5331   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5332   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5333   // and return description of the outer Max/Min.
5334 
5335   // First, check if select has inverse order:
5336   if (CmpRHS == FalseVal) {
5337     std::swap(TrueVal, FalseVal);
5338     Pred = CmpInst::getInversePredicate(Pred);
5339   }
5340 
5341   // Assume success now. If there's no match, callers should not use these anyway.
5342   LHS = TrueVal;
5343   RHS = FalseVal;
5344 
5345   const APFloat *FC1;
5346   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5347     return {SPF_UNKNOWN, SPNB_NA, false};
5348 
5349   const APFloat *FC2;
5350   switch (Pred) {
5351   case CmpInst::FCMP_OLT:
5352   case CmpInst::FCMP_OLE:
5353   case CmpInst::FCMP_ULT:
5354   case CmpInst::FCMP_ULE:
5355     if (match(FalseVal,
5356               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5357                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5358         *FC1 < *FC2)
5359       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5360     break;
5361   case CmpInst::FCMP_OGT:
5362   case CmpInst::FCMP_OGE:
5363   case CmpInst::FCMP_UGT:
5364   case CmpInst::FCMP_UGE:
5365     if (match(FalseVal,
5366               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5367                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5368         *FC1 > *FC2)
5369       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5370     break;
5371   default:
5372     break;
5373   }
5374 
5375   return {SPF_UNKNOWN, SPNB_NA, false};
5376 }
5377 
5378 /// Recognize variations of:
5379 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5380 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5381                                       Value *CmpLHS, Value *CmpRHS,
5382                                       Value *TrueVal, Value *FalseVal) {
5383   // Swap the select operands and predicate to match the patterns below.
5384   if (CmpRHS != TrueVal) {
5385     Pred = ICmpInst::getSwappedPredicate(Pred);
5386     std::swap(TrueVal, FalseVal);
5387   }
5388   const APInt *C1;
5389   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5390     const APInt *C2;
5391     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5392     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5393         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5394       return {SPF_SMAX, SPNB_NA, false};
5395 
5396     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5397     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5398         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5399       return {SPF_SMIN, SPNB_NA, false};
5400 
5401     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5402     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5403         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5404       return {SPF_UMAX, SPNB_NA, false};
5405 
5406     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5407     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5408         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5409       return {SPF_UMIN, SPNB_NA, false};
5410   }
5411   return {SPF_UNKNOWN, SPNB_NA, false};
5412 }
5413 
5414 /// Recognize variations of:
5415 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5416 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5417                                                Value *CmpLHS, Value *CmpRHS,
5418                                                Value *TVal, Value *FVal,
5419                                                unsigned Depth) {
5420   // TODO: Allow FP min/max with nnan/nsz.
5421   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5422 
5423   Value *A = nullptr, *B = nullptr;
5424   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5425   if (!SelectPatternResult::isMinOrMax(L.Flavor))
5426     return {SPF_UNKNOWN, SPNB_NA, false};
5427 
5428   Value *C = nullptr, *D = nullptr;
5429   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5430   if (L.Flavor != R.Flavor)
5431     return {SPF_UNKNOWN, SPNB_NA, false};
5432 
5433   // We have something like: x Pred y ? min(a, b) : min(c, d).
5434   // Try to match the compare to the min/max operations of the select operands.
5435   // First, make sure we have the right compare predicate.
5436   switch (L.Flavor) {
5437   case SPF_SMIN:
5438     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5439       Pred = ICmpInst::getSwappedPredicate(Pred);
5440       std::swap(CmpLHS, CmpRHS);
5441     }
5442     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5443       break;
5444     return {SPF_UNKNOWN, SPNB_NA, false};
5445   case SPF_SMAX:
5446     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5447       Pred = ICmpInst::getSwappedPredicate(Pred);
5448       std::swap(CmpLHS, CmpRHS);
5449     }
5450     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5451       break;
5452     return {SPF_UNKNOWN, SPNB_NA, false};
5453   case SPF_UMIN:
5454     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5455       Pred = ICmpInst::getSwappedPredicate(Pred);
5456       std::swap(CmpLHS, CmpRHS);
5457     }
5458     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5459       break;
5460     return {SPF_UNKNOWN, SPNB_NA, false};
5461   case SPF_UMAX:
5462     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5463       Pred = ICmpInst::getSwappedPredicate(Pred);
5464       std::swap(CmpLHS, CmpRHS);
5465     }
5466     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5467       break;
5468     return {SPF_UNKNOWN, SPNB_NA, false};
5469   default:
5470     return {SPF_UNKNOWN, SPNB_NA, false};
5471   }
5472 
5473   // If there is a common operand in the already matched min/max and the other
5474   // min/max operands match the compare operands (either directly or inverted),
5475   // then this is min/max of the same flavor.
5476 
5477   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5478   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5479   if (D == B) {
5480     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5481                                          match(A, m_Not(m_Specific(CmpRHS)))))
5482       return {L.Flavor, SPNB_NA, false};
5483   }
5484   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5485   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5486   if (C == B) {
5487     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5488                                          match(A, m_Not(m_Specific(CmpRHS)))))
5489       return {L.Flavor, SPNB_NA, false};
5490   }
5491   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5492   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5493   if (D == A) {
5494     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5495                                          match(B, m_Not(m_Specific(CmpRHS)))))
5496       return {L.Flavor, SPNB_NA, false};
5497   }
5498   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5499   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5500   if (C == A) {
5501     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5502                                          match(B, m_Not(m_Specific(CmpRHS)))))
5503       return {L.Flavor, SPNB_NA, false};
5504   }
5505 
5506   return {SPF_UNKNOWN, SPNB_NA, false};
5507 }
5508 
5509 /// If the input value is the result of a 'not' op, constant integer, or vector
5510 /// splat of a constant integer, return the bitwise-not source value.
5511 /// TODO: This could be extended to handle non-splat vector integer constants.
5512 static Value *getNotValue(Value *V) {
5513   Value *NotV;
5514   if (match(V, m_Not(m_Value(NotV))))
5515     return NotV;
5516 
5517   const APInt *C;
5518   if (match(V, m_APInt(C)))
5519     return ConstantInt::get(V->getType(), ~(*C));
5520 
5521   return nullptr;
5522 }
5523 
5524 /// Match non-obvious integer minimum and maximum sequences.
5525 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5526                                        Value *CmpLHS, Value *CmpRHS,
5527                                        Value *TrueVal, Value *FalseVal,
5528                                        Value *&LHS, Value *&RHS,
5529                                        unsigned Depth) {
5530   // Assume success. If there's no match, callers should not use these anyway.
5531   LHS = TrueVal;
5532   RHS = FalseVal;
5533 
5534   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5535   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5536     return SPR;
5537 
5538   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5539   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5540     return SPR;
5541 
5542   // Look through 'not' ops to find disguised min/max.
5543   // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5544   // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5545   if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5546     switch (Pred) {
5547     case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5548     case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5549     case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5550     case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5551     default: break;
5552     }
5553   }
5554 
5555   // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5556   // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5557   if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5558     switch (Pred) {
5559     case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5560     case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5561     case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5562     case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5563     default: break;
5564     }
5565   }
5566 
5567   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5568     return {SPF_UNKNOWN, SPNB_NA, false};
5569 
5570   // Z = X -nsw Y
5571   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5572   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5573   if (match(TrueVal, m_Zero()) &&
5574       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5575     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5576 
5577   // Z = X -nsw Y
5578   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5579   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5580   if (match(FalseVal, m_Zero()) &&
5581       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5582     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5583 
5584   const APInt *C1;
5585   if (!match(CmpRHS, m_APInt(C1)))
5586     return {SPF_UNKNOWN, SPNB_NA, false};
5587 
5588   // An unsigned min/max can be written with a signed compare.
5589   const APInt *C2;
5590   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5591       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5592     // Is the sign bit set?
5593     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5594     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5595     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5596         C2->isMaxSignedValue())
5597       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5598 
5599     // Is the sign bit clear?
5600     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5601     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5602     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5603         C2->isMinSignedValue())
5604       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5605   }
5606 
5607   return {SPF_UNKNOWN, SPNB_NA, false};
5608 }
5609 
5610 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5611   assert(X && Y && "Invalid operand");
5612 
5613   // X = sub (0, Y) || X = sub nsw (0, Y)
5614   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5615       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5616     return true;
5617 
5618   // Y = sub (0, X) || Y = sub nsw (0, X)
5619   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5620       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5621     return true;
5622 
5623   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5624   Value *A, *B;
5625   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5626                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5627          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5628                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5629 }
5630 
5631 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5632                                               FastMathFlags FMF,
5633                                               Value *CmpLHS, Value *CmpRHS,
5634                                               Value *TrueVal, Value *FalseVal,
5635                                               Value *&LHS, Value *&RHS,
5636                                               unsigned Depth) {
5637   if (CmpInst::isFPPredicate(Pred)) {
5638     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5639     // 0.0 operand, set the compare's 0.0 operands to that same value for the
5640     // purpose of identifying min/max. Disregard vector constants with undefined
5641     // elements because those can not be back-propagated for analysis.
5642     Value *OutputZeroVal = nullptr;
5643     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5644         !cast<Constant>(TrueVal)->containsUndefElement())
5645       OutputZeroVal = TrueVal;
5646     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5647              !cast<Constant>(FalseVal)->containsUndefElement())
5648       OutputZeroVal = FalseVal;
5649 
5650     if (OutputZeroVal) {
5651       if (match(CmpLHS, m_AnyZeroFP()))
5652         CmpLHS = OutputZeroVal;
5653       if (match(CmpRHS, m_AnyZeroFP()))
5654         CmpRHS = OutputZeroVal;
5655     }
5656   }
5657 
5658   LHS = CmpLHS;
5659   RHS = CmpRHS;
5660 
5661   // Signed zero may return inconsistent results between implementations.
5662   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5663   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5664   // Therefore, we behave conservatively and only proceed if at least one of the
5665   // operands is known to not be zero or if we don't care about signed zero.
5666   switch (Pred) {
5667   default: break;
5668   // FIXME: Include OGT/OLT/UGT/ULT.
5669   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5670   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5671     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5672         !isKnownNonZero(CmpRHS))
5673       return {SPF_UNKNOWN, SPNB_NA, false};
5674   }
5675 
5676   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5677   bool Ordered = false;
5678 
5679   // When given one NaN and one non-NaN input:
5680   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5681   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5682   //     ordered comparison fails), which could be NaN or non-NaN.
5683   // so here we discover exactly what NaN behavior is required/accepted.
5684   if (CmpInst::isFPPredicate(Pred)) {
5685     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5686     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5687 
5688     if (LHSSafe && RHSSafe) {
5689       // Both operands are known non-NaN.
5690       NaNBehavior = SPNB_RETURNS_ANY;
5691     } else if (CmpInst::isOrdered(Pred)) {
5692       // An ordered comparison will return false when given a NaN, so it
5693       // returns the RHS.
5694       Ordered = true;
5695       if (LHSSafe)
5696         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5697         NaNBehavior = SPNB_RETURNS_NAN;
5698       else if (RHSSafe)
5699         NaNBehavior = SPNB_RETURNS_OTHER;
5700       else
5701         // Completely unsafe.
5702         return {SPF_UNKNOWN, SPNB_NA, false};
5703     } else {
5704       Ordered = false;
5705       // An unordered comparison will return true when given a NaN, so it
5706       // returns the LHS.
5707       if (LHSSafe)
5708         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5709         NaNBehavior = SPNB_RETURNS_OTHER;
5710       else if (RHSSafe)
5711         NaNBehavior = SPNB_RETURNS_NAN;
5712       else
5713         // Completely unsafe.
5714         return {SPF_UNKNOWN, SPNB_NA, false};
5715     }
5716   }
5717 
5718   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5719     std::swap(CmpLHS, CmpRHS);
5720     Pred = CmpInst::getSwappedPredicate(Pred);
5721     if (NaNBehavior == SPNB_RETURNS_NAN)
5722       NaNBehavior = SPNB_RETURNS_OTHER;
5723     else if (NaNBehavior == SPNB_RETURNS_OTHER)
5724       NaNBehavior = SPNB_RETURNS_NAN;
5725     Ordered = !Ordered;
5726   }
5727 
5728   // ([if]cmp X, Y) ? X : Y
5729   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5730     switch (Pred) {
5731     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5732     case ICmpInst::ICMP_UGT:
5733     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5734     case ICmpInst::ICMP_SGT:
5735     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5736     case ICmpInst::ICMP_ULT:
5737     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5738     case ICmpInst::ICMP_SLT:
5739     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5740     case FCmpInst::FCMP_UGT:
5741     case FCmpInst::FCMP_UGE:
5742     case FCmpInst::FCMP_OGT:
5743     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5744     case FCmpInst::FCMP_ULT:
5745     case FCmpInst::FCMP_ULE:
5746     case FCmpInst::FCMP_OLT:
5747     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5748     }
5749   }
5750 
5751   if (isKnownNegation(TrueVal, FalseVal)) {
5752     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5753     // match against either LHS or sext(LHS).
5754     auto MaybeSExtCmpLHS =
5755         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5756     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5757     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5758     if (match(TrueVal, MaybeSExtCmpLHS)) {
5759       // Set the return values. If the compare uses the negated value (-X >s 0),
5760       // swap the return values because the negated value is always 'RHS'.
5761       LHS = TrueVal;
5762       RHS = FalseVal;
5763       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5764         std::swap(LHS, RHS);
5765 
5766       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5767       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5768       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5769         return {SPF_ABS, SPNB_NA, false};
5770 
5771       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5772       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5773         return {SPF_ABS, SPNB_NA, false};
5774 
5775       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5776       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5777       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5778         return {SPF_NABS, SPNB_NA, false};
5779     }
5780     else if (match(FalseVal, MaybeSExtCmpLHS)) {
5781       // Set the return values. If the compare uses the negated value (-X >s 0),
5782       // swap the return values because the negated value is always 'RHS'.
5783       LHS = FalseVal;
5784       RHS = TrueVal;
5785       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5786         std::swap(LHS, RHS);
5787 
5788       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5789       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5790       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5791         return {SPF_NABS, SPNB_NA, false};
5792 
5793       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5794       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5795       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5796         return {SPF_ABS, SPNB_NA, false};
5797     }
5798   }
5799 
5800   if (CmpInst::isIntPredicate(Pred))
5801     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5802 
5803   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5804   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5805   // semantics than minNum. Be conservative in such case.
5806   if (NaNBehavior != SPNB_RETURNS_ANY ||
5807       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5808        !isKnownNonZero(CmpRHS)))
5809     return {SPF_UNKNOWN, SPNB_NA, false};
5810 
5811   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5812 }
5813 
5814 /// Helps to match a select pattern in case of a type mismatch.
5815 ///
5816 /// The function processes the case when type of true and false values of a
5817 /// select instruction differs from type of the cmp instruction operands because
5818 /// of a cast instruction. The function checks if it is legal to move the cast
5819 /// operation after "select". If yes, it returns the new second value of
5820 /// "select" (with the assumption that cast is moved):
5821 /// 1. As operand of cast instruction when both values of "select" are same cast
5822 /// instructions.
5823 /// 2. As restored constant (by applying reverse cast operation) when the first
5824 /// value of the "select" is a cast operation and the second value is a
5825 /// constant.
5826 /// NOTE: We return only the new second value because the first value could be
5827 /// accessed as operand of cast instruction.
5828 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
5829                               Instruction::CastOps *CastOp) {
5830   auto *Cast1 = dyn_cast<CastInst>(V1);
5831   if (!Cast1)
5832     return nullptr;
5833 
5834   *CastOp = Cast1->getOpcode();
5835   Type *SrcTy = Cast1->getSrcTy();
5836   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5837     // If V1 and V2 are both the same cast from the same type, look through V1.
5838     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5839       return Cast2->getOperand(0);
5840     return nullptr;
5841   }
5842 
5843   auto *C = dyn_cast<Constant>(V2);
5844   if (!C)
5845     return nullptr;
5846 
5847   Constant *CastedTo = nullptr;
5848   switch (*CastOp) {
5849   case Instruction::ZExt:
5850     if (CmpI->isUnsigned())
5851       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5852     break;
5853   case Instruction::SExt:
5854     if (CmpI->isSigned())
5855       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
5856     break;
5857   case Instruction::Trunc:
5858     Constant *CmpConst;
5859     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
5860         CmpConst->getType() == SrcTy) {
5861       // Here we have the following case:
5862       //
5863       //   %cond = cmp iN %x, CmpConst
5864       //   %tr = trunc iN %x to iK
5865       //   %narrowsel = select i1 %cond, iK %t, iK C
5866       //
5867       // We can always move trunc after select operation:
5868       //
5869       //   %cond = cmp iN %x, CmpConst
5870       //   %widesel = select i1 %cond, iN %x, iN CmpConst
5871       //   %tr = trunc iN %widesel to iK
5872       //
5873       // Note that C could be extended in any way because we don't care about
5874       // upper bits after truncation. It can't be abs pattern, because it would
5875       // look like:
5876       //
5877       //   select i1 %cond, x, -x.
5878       //
5879       // So only min/max pattern could be matched. Such match requires widened C
5880       // == CmpConst. That is why set widened C = CmpConst, condition trunc
5881       // CmpConst == C is checked below.
5882       CastedTo = CmpConst;
5883     } else {
5884       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5885     }
5886     break;
5887   case Instruction::FPTrunc:
5888     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5889     break;
5890   case Instruction::FPExt:
5891     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5892     break;
5893   case Instruction::FPToUI:
5894     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5895     break;
5896   case Instruction::FPToSI:
5897     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5898     break;
5899   case Instruction::UIToFP:
5900     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5901     break;
5902   case Instruction::SIToFP:
5903     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5904     break;
5905   default:
5906     break;
5907   }
5908 
5909   if (!CastedTo)
5910     return nullptr;
5911 
5912   // Make sure the cast doesn't lose any information.
5913   Constant *CastedBack =
5914       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5915   if (CastedBack != C)
5916     return nullptr;
5917 
5918   return CastedTo;
5919 }
5920 
5921 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5922                                              Instruction::CastOps *CastOp,
5923                                              unsigned Depth) {
5924   if (Depth >= MaxAnalysisRecursionDepth)
5925     return {SPF_UNKNOWN, SPNB_NA, false};
5926 
5927   SelectInst *SI = dyn_cast<SelectInst>(V);
5928   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5929 
5930   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5931   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5932 
5933   Value *TrueVal = SI->getTrueValue();
5934   Value *FalseVal = SI->getFalseValue();
5935 
5936   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5937                                             CastOp, Depth);
5938 }
5939 
5940 SelectPatternResult llvm::matchDecomposedSelectPattern(
5941     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5942     Instruction::CastOps *CastOp, unsigned Depth) {
5943   CmpInst::Predicate Pred = CmpI->getPredicate();
5944   Value *CmpLHS = CmpI->getOperand(0);
5945   Value *CmpRHS = CmpI->getOperand(1);
5946   FastMathFlags FMF;
5947   if (isa<FPMathOperator>(CmpI))
5948     FMF = CmpI->getFastMathFlags();
5949 
5950   // Bail out early.
5951   if (CmpI->isEquality())
5952     return {SPF_UNKNOWN, SPNB_NA, false};
5953 
5954   // Deal with type mismatches.
5955   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5956     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5957       // If this is a potential fmin/fmax with a cast to integer, then ignore
5958       // -0.0 because there is no corresponding integer value.
5959       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5960         FMF.setNoSignedZeros();
5961       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5962                                   cast<CastInst>(TrueVal)->getOperand(0), C,
5963                                   LHS, RHS, Depth);
5964     }
5965     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5966       // If this is a potential fmin/fmax with a cast to integer, then ignore
5967       // -0.0 because there is no corresponding integer value.
5968       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5969         FMF.setNoSignedZeros();
5970       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5971                                   C, cast<CastInst>(FalseVal)->getOperand(0),
5972                                   LHS, RHS, Depth);
5973     }
5974   }
5975   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5976                               LHS, RHS, Depth);
5977 }
5978 
5979 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5980   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5981   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5982   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5983   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5984   if (SPF == SPF_FMINNUM)
5985     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5986   if (SPF == SPF_FMAXNUM)
5987     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5988   llvm_unreachable("unhandled!");
5989 }
5990 
5991 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5992   if (SPF == SPF_SMIN) return SPF_SMAX;
5993   if (SPF == SPF_UMIN) return SPF_UMAX;
5994   if (SPF == SPF_SMAX) return SPF_SMIN;
5995   if (SPF == SPF_UMAX) return SPF_UMIN;
5996   llvm_unreachable("unhandled!");
5997 }
5998 
5999 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
6000   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
6001 }
6002 
6003 /// Return true if "icmp Pred LHS RHS" is always true.
6004 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6005                             const Value *RHS, const DataLayout &DL,
6006                             unsigned Depth) {
6007   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
6008   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6009     return true;
6010 
6011   switch (Pred) {
6012   default:
6013     return false;
6014 
6015   case CmpInst::ICMP_SLE: {
6016     const APInt *C;
6017 
6018     // LHS s<= LHS +_{nsw} C   if C >= 0
6019     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6020       return !C->isNegative();
6021     return false;
6022   }
6023 
6024   case CmpInst::ICMP_ULE: {
6025     const APInt *C;
6026 
6027     // LHS u<= LHS +_{nuw} C   for any C
6028     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6029       return true;
6030 
6031     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6032     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6033                                        const Value *&X,
6034                                        const APInt *&CA, const APInt *&CB) {
6035       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6036           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6037         return true;
6038 
6039       // If X & C == 0 then (X | C) == X +_{nuw} C
6040       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6041           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6042         KnownBits Known(CA->getBitWidth());
6043         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6044                          /*CxtI*/ nullptr, /*DT*/ nullptr);
6045         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6046           return true;
6047       }
6048 
6049       return false;
6050     };
6051 
6052     const Value *X;
6053     const APInt *CLHS, *CRHS;
6054     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6055       return CLHS->ule(*CRHS);
6056 
6057     return false;
6058   }
6059   }
6060 }
6061 
6062 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6063 /// ALHS ARHS" is true.  Otherwise, return None.
6064 static Optional<bool>
6065 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6066                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
6067                       const DataLayout &DL, unsigned Depth) {
6068   switch (Pred) {
6069   default:
6070     return None;
6071 
6072   case CmpInst::ICMP_SLT:
6073   case CmpInst::ICMP_SLE:
6074     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6075         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6076       return true;
6077     return None;
6078 
6079   case CmpInst::ICMP_ULT:
6080   case CmpInst::ICMP_ULE:
6081     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6082         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6083       return true;
6084     return None;
6085   }
6086 }
6087 
6088 /// Return true if the operands of the two compares match.  IsSwappedOps is true
6089 /// when the operands match, but are swapped.
6090 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6091                           const Value *BLHS, const Value *BRHS,
6092                           bool &IsSwappedOps) {
6093 
6094   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6095   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6096   return IsMatchingOps || IsSwappedOps;
6097 }
6098 
6099 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6100 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6101 /// Otherwise, return None if we can't infer anything.
6102 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6103                                                     CmpInst::Predicate BPred,
6104                                                     bool AreSwappedOps) {
6105   // Canonicalize the predicate as if the operands were not commuted.
6106   if (AreSwappedOps)
6107     BPred = ICmpInst::getSwappedPredicate(BPred);
6108 
6109   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6110     return true;
6111   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6112     return false;
6113 
6114   return None;
6115 }
6116 
6117 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6118 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6119 /// Otherwise, return None if we can't infer anything.
6120 static Optional<bool>
6121 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6122                                  const ConstantInt *C1,
6123                                  CmpInst::Predicate BPred,
6124                                  const ConstantInt *C2) {
6125   ConstantRange DomCR =
6126       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6127   ConstantRange CR =
6128       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
6129   ConstantRange Intersection = DomCR.intersectWith(CR);
6130   ConstantRange Difference = DomCR.difference(CR);
6131   if (Intersection.isEmptySet())
6132     return false;
6133   if (Difference.isEmptySet())
6134     return true;
6135   return None;
6136 }
6137 
6138 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6139 /// false.  Otherwise, return None if we can't infer anything.
6140 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6141                                          CmpInst::Predicate BPred,
6142                                          const Value *BLHS, const Value *BRHS,
6143                                          const DataLayout &DL, bool LHSIsTrue,
6144                                          unsigned Depth) {
6145   Value *ALHS = LHS->getOperand(0);
6146   Value *ARHS = LHS->getOperand(1);
6147 
6148   // The rest of the logic assumes the LHS condition is true.  If that's not the
6149   // case, invert the predicate to make it so.
6150   CmpInst::Predicate APred =
6151       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6152 
6153   // Can we infer anything when the two compares have matching operands?
6154   bool AreSwappedOps;
6155   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6156     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6157             APred, BPred, AreSwappedOps))
6158       return Implication;
6159     // No amount of additional analysis will infer the second condition, so
6160     // early exit.
6161     return None;
6162   }
6163 
6164   // Can we infer anything when the LHS operands match and the RHS operands are
6165   // constants (not necessarily matching)?
6166   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6167     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6168             APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6169       return Implication;
6170     // No amount of additional analysis will infer the second condition, so
6171     // early exit.
6172     return None;
6173   }
6174 
6175   if (APred == BPred)
6176     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6177   return None;
6178 }
6179 
6180 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6181 /// false.  Otherwise, return None if we can't infer anything.  We expect the
6182 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
6183 static Optional<bool>
6184 isImpliedCondAndOr(const BinaryOperator *LHS, CmpInst::Predicate RHSPred,
6185                    const Value *RHSOp0, const Value *RHSOp1,
6186 
6187                    const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6188   // The LHS must be an 'or' or an 'and' instruction.
6189   assert((LHS->getOpcode() == Instruction::And ||
6190           LHS->getOpcode() == Instruction::Or) &&
6191          "Expected LHS to be 'and' or 'or'.");
6192 
6193   assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6194 
6195   // If the result of an 'or' is false, then we know both legs of the 'or' are
6196   // false.  Similarly, if the result of an 'and' is true, then we know both
6197   // legs of the 'and' are true.
6198   Value *ALHS, *ARHS;
6199   if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
6200       (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
6201     // FIXME: Make this non-recursion.
6202     if (Optional<bool> Implication = isImpliedCondition(
6203             ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6204       return Implication;
6205     if (Optional<bool> Implication = isImpliedCondition(
6206             ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6207       return Implication;
6208     return None;
6209   }
6210   return None;
6211 }
6212 
6213 Optional<bool>
6214 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6215                          const Value *RHSOp0, const Value *RHSOp1,
6216                          const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6217   // Bail out when we hit the limit.
6218   if (Depth == MaxAnalysisRecursionDepth)
6219     return None;
6220 
6221   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6222   // example.
6223   if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6224     return None;
6225 
6226   Type *OpTy = LHS->getType();
6227   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6228 
6229   // FIXME: Extending the code below to handle vectors.
6230   if (OpTy->isVectorTy())
6231     return None;
6232 
6233   assert(OpTy->isIntegerTy(1) && "implied by above");
6234 
6235   // Both LHS and RHS are icmps.
6236   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6237   if (LHSCmp)
6238     return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6239                               Depth);
6240 
6241   /// The LHS should be an 'or' or an 'and' instruction.  We expect the RHS to
6242   /// be / an icmp. FIXME: Add support for and/or on the RHS.
6243   const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
6244   if (LHSBO) {
6245     if ((LHSBO->getOpcode() == Instruction::And ||
6246          LHSBO->getOpcode() == Instruction::Or))
6247       return isImpliedCondAndOr(LHSBO, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6248                                 Depth);
6249   }
6250   return None;
6251 }
6252 
6253 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6254                                         const DataLayout &DL, bool LHSIsTrue,
6255                                         unsigned Depth) {
6256   // LHS ==> RHS by definition
6257   if (LHS == RHS)
6258     return LHSIsTrue;
6259 
6260   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6261   if (RHSCmp)
6262     return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6263                               RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6264                               LHSIsTrue, Depth);
6265   return None;
6266 }
6267 
6268 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6269 // condition dominating ContextI or nullptr, if no condition is found.
6270 static std::pair<Value *, bool>
6271 getDomPredecessorCondition(const Instruction *ContextI) {
6272   if (!ContextI || !ContextI->getParent())
6273     return {nullptr, false};
6274 
6275   // TODO: This is a poor/cheap way to determine dominance. Should we use a
6276   // dominator tree (eg, from a SimplifyQuery) instead?
6277   const BasicBlock *ContextBB = ContextI->getParent();
6278   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6279   if (!PredBB)
6280     return {nullptr, false};
6281 
6282   // We need a conditional branch in the predecessor.
6283   Value *PredCond;
6284   BasicBlock *TrueBB, *FalseBB;
6285   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6286     return {nullptr, false};
6287 
6288   // The branch should get simplified. Don't bother simplifying this condition.
6289   if (TrueBB == FalseBB)
6290     return {nullptr, false};
6291 
6292   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6293          "Predecessor block does not point to successor?");
6294 
6295   // Is this condition implied by the predecessor condition?
6296   return {PredCond, TrueBB == ContextBB};
6297 }
6298 
6299 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6300                                              const Instruction *ContextI,
6301                                              const DataLayout &DL) {
6302   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6303   auto PredCond = getDomPredecessorCondition(ContextI);
6304   if (PredCond.first)
6305     return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6306   return None;
6307 }
6308 
6309 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6310                                              const Value *LHS, const Value *RHS,
6311                                              const Instruction *ContextI,
6312                                              const DataLayout &DL) {
6313   auto PredCond = getDomPredecessorCondition(ContextI);
6314   if (PredCond.first)
6315     return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6316                               PredCond.second);
6317   return None;
6318 }
6319 
6320 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6321                               APInt &Upper, const InstrInfoQuery &IIQ) {
6322   unsigned Width = Lower.getBitWidth();
6323   const APInt *C;
6324   switch (BO.getOpcode()) {
6325   case Instruction::Add:
6326     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6327       // FIXME: If we have both nuw and nsw, we should reduce the range further.
6328       if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6329         // 'add nuw x, C' produces [C, UINT_MAX].
6330         Lower = *C;
6331       } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6332         if (C->isNegative()) {
6333           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6334           Lower = APInt::getSignedMinValue(Width);
6335           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6336         } else {
6337           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6338           Lower = APInt::getSignedMinValue(Width) + *C;
6339           Upper = APInt::getSignedMaxValue(Width) + 1;
6340         }
6341       }
6342     }
6343     break;
6344 
6345   case Instruction::And:
6346     if (match(BO.getOperand(1), m_APInt(C)))
6347       // 'and x, C' produces [0, C].
6348       Upper = *C + 1;
6349     break;
6350 
6351   case Instruction::Or:
6352     if (match(BO.getOperand(1), m_APInt(C)))
6353       // 'or x, C' produces [C, UINT_MAX].
6354       Lower = *C;
6355     break;
6356 
6357   case Instruction::AShr:
6358     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6359       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6360       Lower = APInt::getSignedMinValue(Width).ashr(*C);
6361       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6362     } else if (match(BO.getOperand(0), m_APInt(C))) {
6363       unsigned ShiftAmount = Width - 1;
6364       if (!C->isNullValue() && IIQ.isExact(&BO))
6365         ShiftAmount = C->countTrailingZeros();
6366       if (C->isNegative()) {
6367         // 'ashr C, x' produces [C, C >> (Width-1)]
6368         Lower = *C;
6369         Upper = C->ashr(ShiftAmount) + 1;
6370       } else {
6371         // 'ashr C, x' produces [C >> (Width-1), C]
6372         Lower = C->ashr(ShiftAmount);
6373         Upper = *C + 1;
6374       }
6375     }
6376     break;
6377 
6378   case Instruction::LShr:
6379     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6380       // 'lshr x, C' produces [0, UINT_MAX >> C].
6381       Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
6382     } else if (match(BO.getOperand(0), m_APInt(C))) {
6383       // 'lshr C, x' produces [C >> (Width-1), C].
6384       unsigned ShiftAmount = Width - 1;
6385       if (!C->isNullValue() && IIQ.isExact(&BO))
6386         ShiftAmount = C->countTrailingZeros();
6387       Lower = C->lshr(ShiftAmount);
6388       Upper = *C + 1;
6389     }
6390     break;
6391 
6392   case Instruction::Shl:
6393     if (match(BO.getOperand(0), m_APInt(C))) {
6394       if (IIQ.hasNoUnsignedWrap(&BO)) {
6395         // 'shl nuw C, x' produces [C, C << CLZ(C)]
6396         Lower = *C;
6397         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6398       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6399         if (C->isNegative()) {
6400           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6401           unsigned ShiftAmount = C->countLeadingOnes() - 1;
6402           Lower = C->shl(ShiftAmount);
6403           Upper = *C + 1;
6404         } else {
6405           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6406           unsigned ShiftAmount = C->countLeadingZeros() - 1;
6407           Lower = *C;
6408           Upper = C->shl(ShiftAmount) + 1;
6409         }
6410       }
6411     }
6412     break;
6413 
6414   case Instruction::SDiv:
6415     if (match(BO.getOperand(1), m_APInt(C))) {
6416       APInt IntMin = APInt::getSignedMinValue(Width);
6417       APInt IntMax = APInt::getSignedMaxValue(Width);
6418       if (C->isAllOnesValue()) {
6419         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6420         //    where C != -1 and C != 0 and C != 1
6421         Lower = IntMin + 1;
6422         Upper = IntMax + 1;
6423       } else if (C->countLeadingZeros() < Width - 1) {
6424         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6425         //    where C != -1 and C != 0 and C != 1
6426         Lower = IntMin.sdiv(*C);
6427         Upper = IntMax.sdiv(*C);
6428         if (Lower.sgt(Upper))
6429           std::swap(Lower, Upper);
6430         Upper = Upper + 1;
6431         assert(Upper != Lower && "Upper part of range has wrapped!");
6432       }
6433     } else if (match(BO.getOperand(0), m_APInt(C))) {
6434       if (C->isMinSignedValue()) {
6435         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6436         Lower = *C;
6437         Upper = Lower.lshr(1) + 1;
6438       } else {
6439         // 'sdiv C, x' produces [-|C|, |C|].
6440         Upper = C->abs() + 1;
6441         Lower = (-Upper) + 1;
6442       }
6443     }
6444     break;
6445 
6446   case Instruction::UDiv:
6447     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6448       // 'udiv x, C' produces [0, UINT_MAX / C].
6449       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6450     } else if (match(BO.getOperand(0), m_APInt(C))) {
6451       // 'udiv C, x' produces [0, C].
6452       Upper = *C + 1;
6453     }
6454     break;
6455 
6456   case Instruction::SRem:
6457     if (match(BO.getOperand(1), m_APInt(C))) {
6458       // 'srem x, C' produces (-|C|, |C|).
6459       Upper = C->abs();
6460       Lower = (-Upper) + 1;
6461     }
6462     break;
6463 
6464   case Instruction::URem:
6465     if (match(BO.getOperand(1), m_APInt(C)))
6466       // 'urem x, C' produces [0, C).
6467       Upper = *C;
6468     break;
6469 
6470   default:
6471     break;
6472   }
6473 }
6474 
6475 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6476                                   APInt &Upper) {
6477   unsigned Width = Lower.getBitWidth();
6478   const APInt *C;
6479   switch (II.getIntrinsicID()) {
6480   case Intrinsic::uadd_sat:
6481     // uadd.sat(x, C) produces [C, UINT_MAX].
6482     if (match(II.getOperand(0), m_APInt(C)) ||
6483         match(II.getOperand(1), m_APInt(C)))
6484       Lower = *C;
6485     break;
6486   case Intrinsic::sadd_sat:
6487     if (match(II.getOperand(0), m_APInt(C)) ||
6488         match(II.getOperand(1), m_APInt(C))) {
6489       if (C->isNegative()) {
6490         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6491         Lower = APInt::getSignedMinValue(Width);
6492         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6493       } else {
6494         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6495         Lower = APInt::getSignedMinValue(Width) + *C;
6496         Upper = APInt::getSignedMaxValue(Width) + 1;
6497       }
6498     }
6499     break;
6500   case Intrinsic::usub_sat:
6501     // usub.sat(C, x) produces [0, C].
6502     if (match(II.getOperand(0), m_APInt(C)))
6503       Upper = *C + 1;
6504     // usub.sat(x, C) produces [0, UINT_MAX - C].
6505     else if (match(II.getOperand(1), m_APInt(C)))
6506       Upper = APInt::getMaxValue(Width) - *C + 1;
6507     break;
6508   case Intrinsic::ssub_sat:
6509     if (match(II.getOperand(0), m_APInt(C))) {
6510       if (C->isNegative()) {
6511         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6512         Lower = APInt::getSignedMinValue(Width);
6513         Upper = *C - APInt::getSignedMinValue(Width) + 1;
6514       } else {
6515         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6516         Lower = *C - APInt::getSignedMaxValue(Width);
6517         Upper = APInt::getSignedMaxValue(Width) + 1;
6518       }
6519     } else if (match(II.getOperand(1), m_APInt(C))) {
6520       if (C->isNegative()) {
6521         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6522         Lower = APInt::getSignedMinValue(Width) - *C;
6523         Upper = APInt::getSignedMaxValue(Width) + 1;
6524       } else {
6525         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6526         Lower = APInt::getSignedMinValue(Width);
6527         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6528       }
6529     }
6530     break;
6531   case Intrinsic::umin:
6532   case Intrinsic::umax:
6533   case Intrinsic::smin:
6534   case Intrinsic::smax:
6535     if (!match(II.getOperand(0), m_APInt(C)) &&
6536         !match(II.getOperand(1), m_APInt(C)))
6537       break;
6538 
6539     switch (II.getIntrinsicID()) {
6540     case Intrinsic::umin:
6541       Upper = *C + 1;
6542       break;
6543     case Intrinsic::umax:
6544       Lower = *C;
6545       break;
6546     case Intrinsic::smin:
6547       Lower = APInt::getSignedMinValue(Width);
6548       Upper = *C + 1;
6549       break;
6550     case Intrinsic::smax:
6551       Lower = *C;
6552       Upper = APInt::getSignedMaxValue(Width) + 1;
6553       break;
6554     default:
6555       llvm_unreachable("Must be min/max intrinsic");
6556     }
6557     break;
6558   case Intrinsic::abs:
6559     // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6560     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6561     if (match(II.getOperand(1), m_One()))
6562       Upper = APInt::getSignedMaxValue(Width) + 1;
6563     else
6564       Upper = APInt::getSignedMinValue(Width) + 1;
6565     break;
6566   default:
6567     break;
6568   }
6569 }
6570 
6571 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6572                                       APInt &Upper, const InstrInfoQuery &IIQ) {
6573   const Value *LHS = nullptr, *RHS = nullptr;
6574   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6575   if (R.Flavor == SPF_UNKNOWN)
6576     return;
6577 
6578   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6579 
6580   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6581     // If the negation part of the abs (in RHS) has the NSW flag,
6582     // then the result of abs(X) is [0..SIGNED_MAX],
6583     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6584     Lower = APInt::getNullValue(BitWidth);
6585     if (match(RHS, m_Neg(m_Specific(LHS))) &&
6586         IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6587       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6588     else
6589       Upper = APInt::getSignedMinValue(BitWidth) + 1;
6590     return;
6591   }
6592 
6593   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6594     // The result of -abs(X) is <= 0.
6595     Lower = APInt::getSignedMinValue(BitWidth);
6596     Upper = APInt(BitWidth, 1);
6597     return;
6598   }
6599 
6600   const APInt *C;
6601   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6602     return;
6603 
6604   switch (R.Flavor) {
6605     case SPF_UMIN:
6606       Upper = *C + 1;
6607       break;
6608     case SPF_UMAX:
6609       Lower = *C;
6610       break;
6611     case SPF_SMIN:
6612       Lower = APInt::getSignedMinValue(BitWidth);
6613       Upper = *C + 1;
6614       break;
6615     case SPF_SMAX:
6616       Lower = *C;
6617       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6618       break;
6619     default:
6620       break;
6621   }
6622 }
6623 
6624 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
6625                                          AssumptionCache *AC,
6626                                          const Instruction *CtxI,
6627                                          unsigned Depth) {
6628   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
6629 
6630   if (Depth == MaxAnalysisRecursionDepth)
6631     return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
6632 
6633   const APInt *C;
6634   if (match(V, m_APInt(C)))
6635     return ConstantRange(*C);
6636 
6637   InstrInfoQuery IIQ(UseInstrInfo);
6638   unsigned BitWidth = V->getType()->getScalarSizeInBits();
6639   APInt Lower = APInt(BitWidth, 0);
6640   APInt Upper = APInt(BitWidth, 0);
6641   if (auto *BO = dyn_cast<BinaryOperator>(V))
6642     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
6643   else if (auto *II = dyn_cast<IntrinsicInst>(V))
6644     setLimitsForIntrinsic(*II, Lower, Upper);
6645   else if (auto *SI = dyn_cast<SelectInst>(V))
6646     setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
6647 
6648   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
6649 
6650   if (auto *I = dyn_cast<Instruction>(V))
6651     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
6652       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
6653 
6654   if (CtxI && AC) {
6655     // Try to restrict the range based on information from assumptions.
6656     for (auto &AssumeVH : AC->assumptionsFor(V)) {
6657       if (!AssumeVH)
6658         continue;
6659       CallInst *I = cast<CallInst>(AssumeVH);
6660       assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
6661              "Got assumption for the wrong function!");
6662       assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
6663              "must be an assume intrinsic");
6664 
6665       if (!isValidAssumeForContext(I, CtxI, nullptr))
6666         continue;
6667       Value *Arg = I->getArgOperand(0);
6668       ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
6669       // Currently we just use information from comparisons.
6670       if (!Cmp || Cmp->getOperand(0) != V)
6671         continue;
6672       ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
6673                                                AC, I, Depth + 1);
6674       CR = CR.intersectWith(
6675           ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS));
6676     }
6677   }
6678 
6679   return CR;
6680 }
6681 
6682 static Optional<int64_t>
6683 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
6684   // Skip over the first indices.
6685   gep_type_iterator GTI = gep_type_begin(GEP);
6686   for (unsigned i = 1; i != Idx; ++i, ++GTI)
6687     /*skip along*/;
6688 
6689   // Compute the offset implied by the rest of the indices.
6690   int64_t Offset = 0;
6691   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
6692     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
6693     if (!OpC)
6694       return None;
6695     if (OpC->isZero())
6696       continue; // No offset.
6697 
6698     // Handle struct indices, which add their field offset to the pointer.
6699     if (StructType *STy = GTI.getStructTypeOrNull()) {
6700       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
6701       continue;
6702     }
6703 
6704     // Otherwise, we have a sequential type like an array or fixed-length
6705     // vector. Multiply the index by the ElementSize.
6706     TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
6707     if (Size.isScalable())
6708       return None;
6709     Offset += Size.getFixedSize() * OpC->getSExtValue();
6710   }
6711 
6712   return Offset;
6713 }
6714 
6715 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
6716                                         const DataLayout &DL) {
6717   Ptr1 = Ptr1->stripPointerCasts();
6718   Ptr2 = Ptr2->stripPointerCasts();
6719 
6720   // Handle the trivial case first.
6721   if (Ptr1 == Ptr2) {
6722     return 0;
6723   }
6724 
6725   const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
6726   const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
6727 
6728   // If one pointer is a GEP see if the GEP is a constant offset from the base,
6729   // as in "P" and "gep P, 1".
6730   // Also do this iteratively to handle the the following case:
6731   //   Ptr_t1 = GEP Ptr1, c1
6732   //   Ptr_t2 = GEP Ptr_t1, c2
6733   //   Ptr2 = GEP Ptr_t2, c3
6734   // where we will return c1+c2+c3.
6735   // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
6736   // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
6737   // are the same, and return the difference between offsets.
6738   auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
6739                                  const Value *Ptr) -> Optional<int64_t> {
6740     const GEPOperator *GEP_T = GEP;
6741     int64_t OffsetVal = 0;
6742     bool HasSameBase = false;
6743     while (GEP_T) {
6744       auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
6745       if (!Offset)
6746         return None;
6747       OffsetVal += *Offset;
6748       auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
6749       if (Op0 == Ptr) {
6750         HasSameBase = true;
6751         break;
6752       }
6753       GEP_T = dyn_cast<GEPOperator>(Op0);
6754     }
6755     if (!HasSameBase)
6756       return None;
6757     return OffsetVal;
6758   };
6759 
6760   if (GEP1) {
6761     auto Offset = getOffsetFromBase(GEP1, Ptr2);
6762     if (Offset)
6763       return -*Offset;
6764   }
6765   if (GEP2) {
6766     auto Offset = getOffsetFromBase(GEP2, Ptr1);
6767     if (Offset)
6768       return Offset;
6769   }
6770 
6771   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
6772   // base.  After that base, they may have some number of common (and
6773   // potentially variable) indices.  After that they handle some constant
6774   // offset, which determines their offset from each other.  At this point, we
6775   // handle no other case.
6776   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
6777     return None;
6778 
6779   // Skip any common indices and track the GEP types.
6780   unsigned Idx = 1;
6781   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
6782     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
6783       break;
6784 
6785   auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
6786   auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
6787   if (!Offset1 || !Offset2)
6788     return None;
6789   return *Offset2 - *Offset1;
6790 }
6791