1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/IntrinsicsAArch64.h"
55 #include "llvm/IR/IntrinsicsX86.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/User.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/KnownBits.h"
69 #include "llvm/Support/MathExtras.h"
70 #include <algorithm>
71 #include <array>
72 #include <cassert>
73 #include <cstdint>
74 #include <iterator>
75 #include <utility>
76 
77 using namespace llvm;
78 using namespace llvm::PatternMatch;
79 
80 const unsigned MaxDepth = 6;
81 
82 // Controls the number of uses of the value searched for possible
83 // dominating comparisons.
84 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
85                                               cl::Hidden, cl::init(20));
86 
87 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
88 /// returns the element type's bitwidth.
89 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
90   if (unsigned BitWidth = Ty->getScalarSizeInBits())
91     return BitWidth;
92 
93   return DL.getPointerTypeSizeInBits(Ty);
94 }
95 
96 namespace {
97 
98 // Simplifying using an assume can only be done in a particular control-flow
99 // context (the context instruction provides that context). If an assume and
100 // the context instruction are not in the same block then the DT helps in
101 // figuring out if we can use it.
102 struct Query {
103   const DataLayout &DL;
104   AssumptionCache *AC;
105   const Instruction *CxtI;
106   const DominatorTree *DT;
107 
108   // Unlike the other analyses, this may be a nullptr because not all clients
109   // provide it currently.
110   OptimizationRemarkEmitter *ORE;
111 
112   /// Set of assumptions that should be excluded from further queries.
113   /// This is because of the potential for mutual recursion to cause
114   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
115   /// classic case of this is assume(x = y), which will attempt to determine
116   /// bits in x from bits in y, which will attempt to determine bits in y from
117   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
118   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
119   /// (all of which can call computeKnownBits), and so on.
120   std::array<const Value *, MaxDepth> Excluded;
121 
122   /// If true, it is safe to use metadata during simplification.
123   InstrInfoQuery IIQ;
124 
125   unsigned NumExcluded = 0;
126 
127   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
128         const DominatorTree *DT, bool UseInstrInfo,
129         OptimizationRemarkEmitter *ORE = nullptr)
130       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
131 
132   Query(const Query &Q, const Value *NewExcl)
133       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
134         NumExcluded(Q.NumExcluded) {
135     Excluded = Q.Excluded;
136     Excluded[NumExcluded++] = NewExcl;
137     assert(NumExcluded <= Excluded.size());
138   }
139 
140   bool isExcluded(const Value *Value) const {
141     if (NumExcluded == 0)
142       return false;
143     auto End = Excluded.begin() + NumExcluded;
144     return std::find(Excluded.begin(), End, Value) != End;
145   }
146 };
147 
148 } // end anonymous namespace
149 
150 // Given the provided Value and, potentially, a context instruction, return
151 // the preferred context instruction (if any).
152 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
153   // If we've been provided with a context instruction, then use that (provided
154   // it has been inserted).
155   if (CxtI && CxtI->getParent())
156     return CxtI;
157 
158   // If the value is really an already-inserted instruction, then use that.
159   CxtI = dyn_cast<Instruction>(V);
160   if (CxtI && CxtI->getParent())
161     return CxtI;
162 
163   return nullptr;
164 }
165 
166 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
167                                    const APInt &DemandedElts,
168                                    APInt &DemandedLHS, APInt &DemandedRHS) {
169   // The length of scalable vectors is unknown at compile time, thus we
170   // cannot check their values
171   if (isa<ScalableVectorType>(Shuf->getType()))
172     return false;
173 
174   int NumElts =
175       cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
176   int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
177   DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
178   if (DemandedElts.isNullValue())
179     return true;
180   // Simple case of a shuffle with zeroinitializer.
181   if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
182     DemandedLHS.setBit(0);
183     return true;
184   }
185   for (int i = 0; i != NumMaskElts; ++i) {
186     if (!DemandedElts[i])
187       continue;
188     int M = Shuf->getMaskValue(i);
189     assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
190 
191     // For undef elements, we don't know anything about the common state of
192     // the shuffle result.
193     if (M == -1)
194       return false;
195     if (M < NumElts)
196       DemandedLHS.setBit(M % NumElts);
197     else
198       DemandedRHS.setBit(M % NumElts);
199   }
200 
201   return true;
202 }
203 
204 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
205                              KnownBits &Known, unsigned Depth, const Query &Q);
206 
207 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
208                              const Query &Q) {
209   // FIXME: We currently have no way to represent the DemandedElts of a scalable
210   // vector
211   if (isa<ScalableVectorType>(V->getType())) {
212     Known.resetAll();
213     return;
214   }
215 
216   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
217   APInt DemandedElts =
218       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
219   computeKnownBits(V, DemandedElts, Known, Depth, Q);
220 }
221 
222 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
223                             const DataLayout &DL, unsigned Depth,
224                             AssumptionCache *AC, const Instruction *CxtI,
225                             const DominatorTree *DT,
226                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
227   ::computeKnownBits(V, Known, Depth,
228                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
229 }
230 
231 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
232                             KnownBits &Known, const DataLayout &DL,
233                             unsigned Depth, AssumptionCache *AC,
234                             const Instruction *CxtI, const DominatorTree *DT,
235                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
236   ::computeKnownBits(V, DemandedElts, Known, Depth,
237                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
238 }
239 
240 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
241                                   unsigned Depth, const Query &Q);
242 
243 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
244                                   const Query &Q);
245 
246 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
247                                  unsigned Depth, AssumptionCache *AC,
248                                  const Instruction *CxtI,
249                                  const DominatorTree *DT,
250                                  OptimizationRemarkEmitter *ORE,
251                                  bool UseInstrInfo) {
252   return ::computeKnownBits(
253       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
254 }
255 
256 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
257                                  const DataLayout &DL, unsigned Depth,
258                                  AssumptionCache *AC, const Instruction *CxtI,
259                                  const DominatorTree *DT,
260                                  OptimizationRemarkEmitter *ORE,
261                                  bool UseInstrInfo) {
262   return ::computeKnownBits(
263       V, DemandedElts, Depth,
264       Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
265 }
266 
267 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
268                                const DataLayout &DL, AssumptionCache *AC,
269                                const Instruction *CxtI, const DominatorTree *DT,
270                                bool UseInstrInfo) {
271   assert(LHS->getType() == RHS->getType() &&
272          "LHS and RHS should have the same type");
273   assert(LHS->getType()->isIntOrIntVectorTy() &&
274          "LHS and RHS should be integers");
275   // Look for an inverted mask: (X & ~M) op (Y & M).
276   Value *M;
277   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
278       match(RHS, m_c_And(m_Specific(M), m_Value())))
279     return true;
280   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
281       match(LHS, m_c_And(m_Specific(M), m_Value())))
282     return true;
283   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
284   KnownBits LHSKnown(IT->getBitWidth());
285   KnownBits RHSKnown(IT->getBitWidth());
286   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
287   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
288   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
289 }
290 
291 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
292   for (const User *U : CxtI->users()) {
293     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
294       if (IC->isEquality())
295         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
296           if (C->isNullValue())
297             continue;
298     return false;
299   }
300   return true;
301 }
302 
303 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
304                                    const Query &Q);
305 
306 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
307                                   bool OrZero, unsigned Depth,
308                                   AssumptionCache *AC, const Instruction *CxtI,
309                                   const DominatorTree *DT, bool UseInstrInfo) {
310   return ::isKnownToBeAPowerOfTwo(
311       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
312 }
313 
314 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
315                            unsigned Depth, const Query &Q);
316 
317 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
318 
319 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
320                           AssumptionCache *AC, const Instruction *CxtI,
321                           const DominatorTree *DT, bool UseInstrInfo) {
322   return ::isKnownNonZero(V, Depth,
323                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
324 }
325 
326 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
327                               unsigned Depth, AssumptionCache *AC,
328                               const Instruction *CxtI, const DominatorTree *DT,
329                               bool UseInstrInfo) {
330   KnownBits Known =
331       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
332   return Known.isNonNegative();
333 }
334 
335 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
336                            AssumptionCache *AC, const Instruction *CxtI,
337                            const DominatorTree *DT, bool UseInstrInfo) {
338   if (auto *CI = dyn_cast<ConstantInt>(V))
339     return CI->getValue().isStrictlyPositive();
340 
341   // TODO: We'd doing two recursive queries here.  We should factor this such
342   // that only a single query is needed.
343   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
344          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
345 }
346 
347 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
348                            AssumptionCache *AC, const Instruction *CxtI,
349                            const DominatorTree *DT, bool UseInstrInfo) {
350   KnownBits Known =
351       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
352   return Known.isNegative();
353 }
354 
355 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
356 
357 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
358                            const DataLayout &DL, AssumptionCache *AC,
359                            const Instruction *CxtI, const DominatorTree *DT,
360                            bool UseInstrInfo) {
361   return ::isKnownNonEqual(V1, V2,
362                            Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
363                                  UseInstrInfo, /*ORE=*/nullptr));
364 }
365 
366 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
367                               const Query &Q);
368 
369 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
370                              const DataLayout &DL, unsigned Depth,
371                              AssumptionCache *AC, const Instruction *CxtI,
372                              const DominatorTree *DT, bool UseInstrInfo) {
373   return ::MaskedValueIsZero(
374       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
375 }
376 
377 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
378                                    unsigned Depth, const Query &Q);
379 
380 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
381                                    const Query &Q) {
382   // FIXME: We currently have no way to represent the DemandedElts of a scalable
383   // vector
384   if (isa<ScalableVectorType>(V->getType()))
385     return 1;
386 
387   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
388   APInt DemandedElts =
389       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
390   return ComputeNumSignBits(V, DemandedElts, Depth, Q);
391 }
392 
393 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
394                                   unsigned Depth, AssumptionCache *AC,
395                                   const Instruction *CxtI,
396                                   const DominatorTree *DT, bool UseInstrInfo) {
397   return ::ComputeNumSignBits(
398       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
399 }
400 
401 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
402                                    bool NSW, const APInt &DemandedElts,
403                                    KnownBits &KnownOut, KnownBits &Known2,
404                                    unsigned Depth, const Query &Q) {
405   computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
406 
407   // If one operand is unknown and we have no nowrap information,
408   // the result will be unknown independently of the second operand.
409   if (KnownOut.isUnknown() && !NSW)
410     return;
411 
412   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
413   KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
414 }
415 
416 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
417                                 const APInt &DemandedElts, KnownBits &Known,
418                                 KnownBits &Known2, unsigned Depth,
419                                 const Query &Q) {
420   unsigned BitWidth = Known.getBitWidth();
421   computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
422   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
423 
424   bool isKnownNegative = false;
425   bool isKnownNonNegative = false;
426   // If the multiplication is known not to overflow, compute the sign bit.
427   if (NSW) {
428     if (Op0 == Op1) {
429       // The product of a number with itself is non-negative.
430       isKnownNonNegative = true;
431     } else {
432       bool isKnownNonNegativeOp1 = Known.isNonNegative();
433       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
434       bool isKnownNegativeOp1 = Known.isNegative();
435       bool isKnownNegativeOp0 = Known2.isNegative();
436       // The product of two numbers with the same sign is non-negative.
437       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
438         (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
439       // The product of a negative number and a non-negative number is either
440       // negative or zero.
441       if (!isKnownNonNegative)
442         isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
443                            isKnownNonZero(Op0, Depth, Q)) ||
444                           (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
445                            isKnownNonZero(Op1, Depth, Q));
446     }
447   }
448 
449   assert(!Known.hasConflict() && !Known2.hasConflict());
450   // Compute a conservative estimate for high known-0 bits.
451   unsigned LeadZ =  std::max(Known.countMinLeadingZeros() +
452                              Known2.countMinLeadingZeros(),
453                              BitWidth) - BitWidth;
454   LeadZ = std::min(LeadZ, BitWidth);
455 
456   // The result of the bottom bits of an integer multiply can be
457   // inferred by looking at the bottom bits of both operands and
458   // multiplying them together.
459   // We can infer at least the minimum number of known trailing bits
460   // of both operands. Depending on number of trailing zeros, we can
461   // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
462   // a and b are divisible by m and n respectively.
463   // We then calculate how many of those bits are inferrable and set
464   // the output. For example, the i8 mul:
465   //  a = XXXX1100 (12)
466   //  b = XXXX1110 (14)
467   // We know the bottom 3 bits are zero since the first can be divided by
468   // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
469   // Applying the multiplication to the trimmed arguments gets:
470   //    XX11 (3)
471   //    X111 (7)
472   // -------
473   //    XX11
474   //   XX11
475   //  XX11
476   // XX11
477   // -------
478   // XXXXX01
479   // Which allows us to infer the 2 LSBs. Since we're multiplying the result
480   // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
481   // The proof for this can be described as:
482   // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
483   //      (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
484   //                    umin(countTrailingZeros(C2), C6) +
485   //                    umin(C5 - umin(countTrailingZeros(C1), C5),
486   //                         C6 - umin(countTrailingZeros(C2), C6)))) - 1)
487   // %aa = shl i8 %a, C5
488   // %bb = shl i8 %b, C6
489   // %aaa = or i8 %aa, C1
490   // %bbb = or i8 %bb, C2
491   // %mul = mul i8 %aaa, %bbb
492   // %mask = and i8 %mul, C7
493   //   =>
494   // %mask = i8 ((C1*C2)&C7)
495   // Where C5, C6 describe the known bits of %a, %b
496   // C1, C2 describe the known bottom bits of %a, %b.
497   // C7 describes the mask of the known bits of the result.
498   APInt Bottom0 = Known.One;
499   APInt Bottom1 = Known2.One;
500 
501   // How many times we'd be able to divide each argument by 2 (shr by 1).
502   // This gives us the number of trailing zeros on the multiplication result.
503   unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
504   unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
505   unsigned TrailZero0 = Known.countMinTrailingZeros();
506   unsigned TrailZero1 = Known2.countMinTrailingZeros();
507   unsigned TrailZ = TrailZero0 + TrailZero1;
508 
509   // Figure out the fewest known-bits operand.
510   unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
511                                       TrailBitsKnown1 - TrailZero1);
512   unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
513 
514   APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
515                       Bottom1.getLoBits(TrailBitsKnown1);
516 
517   Known.resetAll();
518   Known.Zero.setHighBits(LeadZ);
519   Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
520   Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
521 
522   // Only make use of no-wrap flags if we failed to compute the sign bit
523   // directly.  This matters if the multiplication always overflows, in
524   // which case we prefer to follow the result of the direct computation,
525   // though as the program is invoking undefined behaviour we can choose
526   // whatever we like here.
527   if (isKnownNonNegative && !Known.isNegative())
528     Known.makeNonNegative();
529   else if (isKnownNegative && !Known.isNonNegative())
530     Known.makeNegative();
531 }
532 
533 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
534                                              KnownBits &Known) {
535   unsigned BitWidth = Known.getBitWidth();
536   unsigned NumRanges = Ranges.getNumOperands() / 2;
537   assert(NumRanges >= 1);
538 
539   Known.Zero.setAllBits();
540   Known.One.setAllBits();
541 
542   for (unsigned i = 0; i < NumRanges; ++i) {
543     ConstantInt *Lower =
544         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
545     ConstantInt *Upper =
546         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
547     ConstantRange Range(Lower->getValue(), Upper->getValue());
548 
549     // The first CommonPrefixBits of all values in Range are equal.
550     unsigned CommonPrefixBits =
551         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
552 
553     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
554     Known.One &= Range.getUnsignedMax() & Mask;
555     Known.Zero &= ~Range.getUnsignedMax() & Mask;
556   }
557 }
558 
559 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
560   SmallVector<const Value *, 16> WorkSet(1, I);
561   SmallPtrSet<const Value *, 32> Visited;
562   SmallPtrSet<const Value *, 16> EphValues;
563 
564   // The instruction defining an assumption's condition itself is always
565   // considered ephemeral to that assumption (even if it has other
566   // non-ephemeral users). See r246696's test case for an example.
567   if (is_contained(I->operands(), E))
568     return true;
569 
570   while (!WorkSet.empty()) {
571     const Value *V = WorkSet.pop_back_val();
572     if (!Visited.insert(V).second)
573       continue;
574 
575     // If all uses of this value are ephemeral, then so is this value.
576     if (llvm::all_of(V->users(), [&](const User *U) {
577                                    return EphValues.count(U);
578                                  })) {
579       if (V == E)
580         return true;
581 
582       if (V == I || isSafeToSpeculativelyExecute(V)) {
583        EphValues.insert(V);
584        if (const User *U = dyn_cast<User>(V))
585          for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
586               J != JE; ++J)
587            WorkSet.push_back(*J);
588       }
589     }
590   }
591 
592   return false;
593 }
594 
595 // Is this an intrinsic that cannot be speculated but also cannot trap?
596 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
597   if (const CallInst *CI = dyn_cast<CallInst>(I))
598     if (Function *F = CI->getCalledFunction())
599       switch (F->getIntrinsicID()) {
600       default: break;
601       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
602       case Intrinsic::assume:
603       case Intrinsic::sideeffect:
604       case Intrinsic::dbg_declare:
605       case Intrinsic::dbg_value:
606       case Intrinsic::dbg_label:
607       case Intrinsic::invariant_start:
608       case Intrinsic::invariant_end:
609       case Intrinsic::lifetime_start:
610       case Intrinsic::lifetime_end:
611       case Intrinsic::objectsize:
612       case Intrinsic::ptr_annotation:
613       case Intrinsic::var_annotation:
614         return true;
615       }
616 
617   return false;
618 }
619 
620 bool llvm::isValidAssumeForContext(const Instruction *Inv,
621                                    const Instruction *CxtI,
622                                    const DominatorTree *DT) {
623   // There are two restrictions on the use of an assume:
624   //  1. The assume must dominate the context (or the control flow must
625   //     reach the assume whenever it reaches the context).
626   //  2. The context must not be in the assume's set of ephemeral values
627   //     (otherwise we will use the assume to prove that the condition
628   //     feeding the assume is trivially true, thus causing the removal of
629   //     the assume).
630 
631   if (Inv->getParent() == CxtI->getParent()) {
632     // If Inv and CtxI are in the same block, check if the assume (Inv) is first
633     // in the BB.
634     if (Inv->comesBefore(CxtI))
635       return true;
636 
637     // Don't let an assume affect itself - this would cause the problems
638     // `isEphemeralValueOf` is trying to prevent, and it would also make
639     // the loop below go out of bounds.
640     if (Inv == CxtI)
641       return false;
642 
643     // The context comes first, but they're both in the same block.
644     // Make sure there is nothing in between that might interrupt
645     // the control flow, not even CxtI itself.
646     for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
647       if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
648         return false;
649 
650     return !isEphemeralValueOf(Inv, CxtI);
651   }
652 
653   // Inv and CxtI are in different blocks.
654   if (DT) {
655     if (DT->dominates(Inv, CxtI))
656       return true;
657   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
658     // We don't have a DT, but this trivially dominates.
659     return true;
660   }
661 
662   return false;
663 }
664 
665 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
666   // Use of assumptions is context-sensitive. If we don't have a context, we
667   // cannot use them!
668   if (!Q.AC || !Q.CxtI)
669     return false;
670 
671   // Note that the patterns below need to be kept in sync with the code
672   // in AssumptionCache::updateAffectedValues.
673 
674   auto CmpExcludesZero = [V](ICmpInst *Cmp) {
675     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
676 
677     Value *RHS;
678     CmpInst::Predicate Pred;
679     if (!match(Cmp, m_c_ICmp(Pred, m_V, m_Value(RHS))))
680       return false;
681     // assume(v u> y) -> assume(v != 0)
682     if (Pred == ICmpInst::ICMP_UGT)
683       return true;
684 
685     // assume(v != 0)
686     // We special-case this one to ensure that we handle `assume(v != null)`.
687     if (Pred == ICmpInst::ICMP_NE)
688       return match(RHS, m_Zero());
689 
690     // All other predicates - rely on generic ConstantRange handling.
691     ConstantInt *CI;
692     if (!match(RHS, m_ConstantInt(CI)))
693       return false;
694     ConstantRange RHSRange(CI->getValue());
695     ConstantRange TrueValues =
696         ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
697     return !TrueValues.contains(APInt::getNullValue(CI->getBitWidth()));
698   };
699 
700   if (Q.CxtI && V->getType()->isPointerTy()) {
701     SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
702     if (!NullPointerIsDefined(Q.CxtI->getFunction(),
703                               V->getType()->getPointerAddressSpace()))
704       AttrKinds.push_back(Attribute::Dereferenceable);
705 
706     if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
707       return true;
708   }
709 
710   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
711     if (!AssumeVH)
712       continue;
713     CallInst *I = cast<CallInst>(AssumeVH);
714     assert(I->getFunction() == Q.CxtI->getFunction() &&
715            "Got assumption for the wrong function!");
716     if (Q.isExcluded(I))
717       continue;
718 
719     // Warning: This loop can end up being somewhat performance sensitive.
720     // We're running this loop for once for each value queried resulting in a
721     // runtime of ~O(#assumes * #values).
722 
723     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
724            "must be an assume intrinsic");
725 
726     Value *Arg = I->getArgOperand(0);
727     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
728     if (!Cmp)
729       continue;
730 
731     if (CmpExcludesZero(Cmp) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
732       return true;
733   }
734 
735   return false;
736 }
737 
738 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
739                                        unsigned Depth, const Query &Q) {
740   // Use of assumptions is context-sensitive. If we don't have a context, we
741   // cannot use them!
742   if (!Q.AC || !Q.CxtI)
743     return;
744 
745   unsigned BitWidth = Known.getBitWidth();
746 
747   // Note that the patterns below need to be kept in sync with the code
748   // in AssumptionCache::updateAffectedValues.
749 
750   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
751     if (!AssumeVH)
752       continue;
753     CallInst *I = cast<CallInst>(AssumeVH);
754     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
755            "Got assumption for the wrong function!");
756     if (Q.isExcluded(I))
757       continue;
758 
759     // Warning: This loop can end up being somewhat performance sensitive.
760     // We're running this loop for once for each value queried resulting in a
761     // runtime of ~O(#assumes * #values).
762 
763     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
764            "must be an assume intrinsic");
765 
766     Value *Arg = I->getArgOperand(0);
767 
768     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
769       assert(BitWidth == 1 && "assume operand is not i1?");
770       Known.setAllOnes();
771       return;
772     }
773     if (match(Arg, m_Not(m_Specific(V))) &&
774         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
775       assert(BitWidth == 1 && "assume operand is not i1?");
776       Known.setAllZero();
777       return;
778     }
779 
780     // The remaining tests are all recursive, so bail out if we hit the limit.
781     if (Depth == MaxDepth)
782       continue;
783 
784     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
785     if (!Cmp)
786       continue;
787 
788     // Note that ptrtoint may change the bitwidth.
789     Value *A, *B;
790     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
791 
792     CmpInst::Predicate Pred;
793     uint64_t C;
794     switch (Cmp->getPredicate()) {
795     default:
796       break;
797     case ICmpInst::ICMP_EQ:
798       // assume(v = a)
799       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
800           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
801         KnownBits RHSKnown =
802             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
803         Known.Zero |= RHSKnown.Zero;
804         Known.One  |= RHSKnown.One;
805       // assume(v & b = a)
806       } else if (match(Cmp,
807                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
808                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
809         KnownBits RHSKnown =
810             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
811         KnownBits MaskKnown =
812             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
813 
814         // For those bits in the mask that are known to be one, we can propagate
815         // known bits from the RHS to V.
816         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
817         Known.One  |= RHSKnown.One  & MaskKnown.One;
818       // assume(~(v & b) = a)
819       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
820                                      m_Value(A))) &&
821                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
822         KnownBits RHSKnown =
823             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
824         KnownBits MaskKnown =
825             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
826 
827         // For those bits in the mask that are known to be one, we can propagate
828         // inverted known bits from the RHS to V.
829         Known.Zero |= RHSKnown.One  & MaskKnown.One;
830         Known.One  |= RHSKnown.Zero & MaskKnown.One;
831       // assume(v | b = a)
832       } else if (match(Cmp,
833                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
834                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
835         KnownBits RHSKnown =
836             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
837         KnownBits BKnown =
838             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
839 
840         // For those bits in B that are known to be zero, we can propagate known
841         // bits from the RHS to V.
842         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
843         Known.One  |= RHSKnown.One  & BKnown.Zero;
844       // assume(~(v | b) = a)
845       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
846                                      m_Value(A))) &&
847                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
848         KnownBits RHSKnown =
849             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
850         KnownBits BKnown =
851             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
852 
853         // For those bits in B that are known to be zero, we can propagate
854         // inverted known bits from the RHS to V.
855         Known.Zero |= RHSKnown.One  & BKnown.Zero;
856         Known.One  |= RHSKnown.Zero & BKnown.Zero;
857       // assume(v ^ b = a)
858       } else if (match(Cmp,
859                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
860                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
861         KnownBits RHSKnown =
862             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
863         KnownBits BKnown =
864             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
865 
866         // For those bits in B that are known to be zero, we can propagate known
867         // bits from the RHS to V. For those bits in B that are known to be one,
868         // we can propagate inverted known bits from the RHS to V.
869         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
870         Known.One  |= RHSKnown.One  & BKnown.Zero;
871         Known.Zero |= RHSKnown.One  & BKnown.One;
872         Known.One  |= RHSKnown.Zero & BKnown.One;
873       // assume(~(v ^ b) = a)
874       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
875                                      m_Value(A))) &&
876                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
877         KnownBits RHSKnown =
878             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
879         KnownBits BKnown =
880             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
881 
882         // For those bits in B that are known to be zero, we can propagate
883         // inverted known bits from the RHS to V. For those bits in B that are
884         // known to be one, we can propagate known bits from the RHS to V.
885         Known.Zero |= RHSKnown.One  & BKnown.Zero;
886         Known.One  |= RHSKnown.Zero & BKnown.Zero;
887         Known.Zero |= RHSKnown.Zero & BKnown.One;
888         Known.One  |= RHSKnown.One  & BKnown.One;
889       // assume(v << c = a)
890       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
891                                      m_Value(A))) &&
892                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
893         KnownBits RHSKnown =
894             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
895 
896         // For those bits in RHS that are known, we can propagate them to known
897         // bits in V shifted to the right by C.
898         RHSKnown.Zero.lshrInPlace(C);
899         Known.Zero |= RHSKnown.Zero;
900         RHSKnown.One.lshrInPlace(C);
901         Known.One  |= RHSKnown.One;
902       // assume(~(v << c) = a)
903       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
904                                      m_Value(A))) &&
905                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
906         KnownBits RHSKnown =
907             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
908         // For those bits in RHS that are known, we can propagate them inverted
909         // to known bits in V shifted to the right by C.
910         RHSKnown.One.lshrInPlace(C);
911         Known.Zero |= RHSKnown.One;
912         RHSKnown.Zero.lshrInPlace(C);
913         Known.One  |= RHSKnown.Zero;
914       // assume(v >> c = a)
915       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
916                                      m_Value(A))) &&
917                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
918         KnownBits RHSKnown =
919             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
920         // For those bits in RHS that are known, we can propagate them to known
921         // bits in V shifted to the right by C.
922         Known.Zero |= RHSKnown.Zero << C;
923         Known.One  |= RHSKnown.One  << C;
924       // assume(~(v >> c) = a)
925       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
926                                      m_Value(A))) &&
927                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
928         KnownBits RHSKnown =
929             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
930         // For those bits in RHS that are known, we can propagate them inverted
931         // to known bits in V shifted to the right by C.
932         Known.Zero |= RHSKnown.One  << C;
933         Known.One  |= RHSKnown.Zero << C;
934       }
935       break;
936     case ICmpInst::ICMP_SGE:
937       // assume(v >=_s c) where c is non-negative
938       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
939           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
940         KnownBits RHSKnown =
941             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
942 
943         if (RHSKnown.isNonNegative()) {
944           // We know that the sign bit is zero.
945           Known.makeNonNegative();
946         }
947       }
948       break;
949     case ICmpInst::ICMP_SGT:
950       // assume(v >_s c) where c is at least -1.
951       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
952           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
953         KnownBits RHSKnown =
954             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
955 
956         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
957           // We know that the sign bit is zero.
958           Known.makeNonNegative();
959         }
960       }
961       break;
962     case ICmpInst::ICMP_SLE:
963       // assume(v <=_s c) where c is negative
964       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
965           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
966         KnownBits RHSKnown =
967             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
968 
969         if (RHSKnown.isNegative()) {
970           // We know that the sign bit is one.
971           Known.makeNegative();
972         }
973       }
974       break;
975     case ICmpInst::ICMP_SLT:
976       // assume(v <_s c) where c is non-positive
977       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
978           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
979         KnownBits RHSKnown =
980             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
981 
982         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
983           // We know that the sign bit is one.
984           Known.makeNegative();
985         }
986       }
987       break;
988     case ICmpInst::ICMP_ULE:
989       // assume(v <=_u c)
990       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
991           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
992         KnownBits RHSKnown =
993             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
994 
995         // Whatever high bits in c are zero are known to be zero.
996         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
997       }
998       break;
999     case ICmpInst::ICMP_ULT:
1000       // assume(v <_u c)
1001       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
1002           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
1003         KnownBits RHSKnown =
1004             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
1005 
1006         // If the RHS is known zero, then this assumption must be wrong (nothing
1007         // is unsigned less than zero). Signal a conflict and get out of here.
1008         if (RHSKnown.isZero()) {
1009           Known.Zero.setAllBits();
1010           Known.One.setAllBits();
1011           break;
1012         }
1013 
1014         // Whatever high bits in c are zero are known to be zero (if c is a power
1015         // of 2, then one more).
1016         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
1017           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
1018         else
1019           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
1020       }
1021       break;
1022     }
1023   }
1024 
1025   // If assumptions conflict with each other or previous known bits, then we
1026   // have a logical fallacy. It's possible that the assumption is not reachable,
1027   // so this isn't a real bug. On the other hand, the program may have undefined
1028   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
1029   // clear out the known bits, try to warn the user, and hope for the best.
1030   if (Known.Zero.intersects(Known.One)) {
1031     Known.resetAll();
1032 
1033     if (Q.ORE)
1034       Q.ORE->emit([&]() {
1035         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
1036         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
1037                                           CxtI)
1038                << "Detected conflicting code assumptions. Program may "
1039                   "have undefined behavior, or compiler may have "
1040                   "internal error.";
1041       });
1042   }
1043 }
1044 
1045 /// Compute known bits from a shift operator, including those with a
1046 /// non-constant shift amount. Known is the output of this function. Known2 is a
1047 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
1048 /// operator-specific functions that, given the known-zero or known-one bits
1049 /// respectively, and a shift amount, compute the implied known-zero or
1050 /// known-one bits of the shift operator's result respectively for that shift
1051 /// amount. The results from calling KZF and KOF are conservatively combined for
1052 /// all permitted shift amounts.
1053 static void computeKnownBitsFromShiftOperator(
1054     const Operator *I, const APInt &DemandedElts, KnownBits &Known,
1055     KnownBits &Known2, unsigned Depth, const Query &Q,
1056     function_ref<APInt(const APInt &, unsigned)> KZF,
1057     function_ref<APInt(const APInt &, unsigned)> KOF) {
1058   unsigned BitWidth = Known.getBitWidth();
1059 
1060   computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1061   if (Known.isConstant()) {
1062     unsigned ShiftAmt = Known.getConstant().getLimitedValue(BitWidth - 1);
1063 
1064     computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1065     Known.Zero = KZF(Known.Zero, ShiftAmt);
1066     Known.One  = KOF(Known.One, ShiftAmt);
1067     // If the known bits conflict, this must be an overflowing left shift, so
1068     // the shift result is poison. We can return anything we want. Choose 0 for
1069     // the best folding opportunity.
1070     if (Known.hasConflict())
1071       Known.setAllZero();
1072 
1073     return;
1074   }
1075 
1076   // If the shift amount could be greater than or equal to the bit-width of the
1077   // LHS, the value could be poison, but bail out because the check below is
1078   // expensive.
1079   // TODO: Should we just carry on?
1080   if (Known.getMaxValue().uge(BitWidth)) {
1081     Known.resetAll();
1082     return;
1083   }
1084 
1085   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
1086   // BitWidth > 64 and any upper bits are known, we'll end up returning the
1087   // limit value (which implies all bits are known).
1088   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
1089   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
1090 
1091   // It would be more-clearly correct to use the two temporaries for this
1092   // calculation. Reusing the APInts here to prevent unnecessary allocations.
1093   Known.resetAll();
1094 
1095   // If we know the shifter operand is nonzero, we can sometimes infer more
1096   // known bits. However this is expensive to compute, so be lazy about it and
1097   // only compute it when absolutely necessary.
1098   Optional<bool> ShifterOperandIsNonZero;
1099 
1100   // Early exit if we can't constrain any well-defined shift amount.
1101   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1102       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1103     ShifterOperandIsNonZero =
1104         isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1105     if (!*ShifterOperandIsNonZero)
1106       return;
1107   }
1108 
1109   computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1110 
1111   Known.Zero.setAllBits();
1112   Known.One.setAllBits();
1113   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1114     // Combine the shifted known input bits only for those shift amounts
1115     // compatible with its known constraints.
1116     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1117       continue;
1118     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1119       continue;
1120     // If we know the shifter is nonzero, we may be able to infer more known
1121     // bits. This check is sunk down as far as possible to avoid the expensive
1122     // call to isKnownNonZero if the cheaper checks above fail.
1123     if (ShiftAmt == 0) {
1124       if (!ShifterOperandIsNonZero.hasValue())
1125         ShifterOperandIsNonZero =
1126             isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1127       if (*ShifterOperandIsNonZero)
1128         continue;
1129     }
1130 
1131     Known.Zero &= KZF(Known2.Zero, ShiftAmt);
1132     Known.One  &= KOF(Known2.One, ShiftAmt);
1133   }
1134 
1135   // If the known bits conflict, the result is poison. Return a 0 and hope the
1136   // caller can further optimize that.
1137   if (Known.hasConflict())
1138     Known.setAllZero();
1139 }
1140 
1141 static void computeKnownBitsFromOperator(const Operator *I,
1142                                          const APInt &DemandedElts,
1143                                          KnownBits &Known, unsigned Depth,
1144                                          const Query &Q) {
1145   unsigned BitWidth = Known.getBitWidth();
1146 
1147   KnownBits Known2(BitWidth);
1148   switch (I->getOpcode()) {
1149   default: break;
1150   case Instruction::Load:
1151     if (MDNode *MD =
1152             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1153       computeKnownBitsFromRangeMetadata(*MD, Known);
1154     break;
1155   case Instruction::And: {
1156     // If either the LHS or the RHS are Zero, the result is zero.
1157     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1158     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1159 
1160     Known &= Known2;
1161 
1162     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1163     // here we handle the more general case of adding any odd number by
1164     // matching the form add(x, add(x, y)) where y is odd.
1165     // TODO: This could be generalized to clearing any bit set in y where the
1166     // following bit is known to be unset in y.
1167     Value *X = nullptr, *Y = nullptr;
1168     if (!Known.Zero[0] && !Known.One[0] &&
1169         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1170       Known2.resetAll();
1171       computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1172       if (Known2.countMinTrailingOnes() > 0)
1173         Known.Zero.setBit(0);
1174     }
1175     break;
1176   }
1177   case Instruction::Or:
1178     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1179     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1180 
1181     Known |= Known2;
1182     break;
1183   case Instruction::Xor:
1184     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1185     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1186 
1187     Known ^= Known2;
1188     break;
1189   case Instruction::Mul: {
1190     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1191     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1192                         Known, Known2, Depth, Q);
1193     break;
1194   }
1195   case Instruction::UDiv: {
1196     // For the purposes of computing leading zeros we can conservatively
1197     // treat a udiv as a logical right shift by the power of 2 known to
1198     // be less than the denominator.
1199     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1200     unsigned LeadZ = Known2.countMinLeadingZeros();
1201 
1202     Known2.resetAll();
1203     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1204     unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1205     if (RHSMaxLeadingZeros != BitWidth)
1206       LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1207 
1208     Known.Zero.setHighBits(LeadZ);
1209     break;
1210   }
1211   case Instruction::Select: {
1212     const Value *LHS = nullptr, *RHS = nullptr;
1213     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1214     if (SelectPatternResult::isMinOrMax(SPF)) {
1215       computeKnownBits(RHS, Known, Depth + 1, Q);
1216       computeKnownBits(LHS, Known2, Depth + 1, Q);
1217     } else {
1218       computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1219       computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1220     }
1221 
1222     unsigned MaxHighOnes = 0;
1223     unsigned MaxHighZeros = 0;
1224     if (SPF == SPF_SMAX) {
1225       // If both sides are negative, the result is negative.
1226       if (Known.isNegative() && Known2.isNegative())
1227         // We can derive a lower bound on the result by taking the max of the
1228         // leading one bits.
1229         MaxHighOnes =
1230             std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1231       // If either side is non-negative, the result is non-negative.
1232       else if (Known.isNonNegative() || Known2.isNonNegative())
1233         MaxHighZeros = 1;
1234     } else if (SPF == SPF_SMIN) {
1235       // If both sides are non-negative, the result is non-negative.
1236       if (Known.isNonNegative() && Known2.isNonNegative())
1237         // We can derive an upper bound on the result by taking the max of the
1238         // leading zero bits.
1239         MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1240                                 Known2.countMinLeadingZeros());
1241       // If either side is negative, the result is negative.
1242       else if (Known.isNegative() || Known2.isNegative())
1243         MaxHighOnes = 1;
1244     } else if (SPF == SPF_UMAX) {
1245       // We can derive a lower bound on the result by taking the max of the
1246       // leading one bits.
1247       MaxHighOnes =
1248           std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes());
1249     } else if (SPF == SPF_UMIN) {
1250       // We can derive an upper bound on the result by taking the max of the
1251       // leading zero bits.
1252       MaxHighZeros =
1253           std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1254     } else if (SPF == SPF_ABS) {
1255       // RHS from matchSelectPattern returns the negation part of abs pattern.
1256       // If the negate has an NSW flag we can assume the sign bit of the result
1257       // will be 0 because that makes abs(INT_MIN) undefined.
1258       if (match(RHS, m_Neg(m_Specific(LHS))) &&
1259           Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1260         MaxHighZeros = 1;
1261     }
1262 
1263     // Only known if known in both the LHS and RHS.
1264     Known.One &= Known2.One;
1265     Known.Zero &= Known2.Zero;
1266     if (MaxHighOnes > 0)
1267       Known.One.setHighBits(MaxHighOnes);
1268     if (MaxHighZeros > 0)
1269       Known.Zero.setHighBits(MaxHighZeros);
1270     break;
1271   }
1272   case Instruction::FPTrunc:
1273   case Instruction::FPExt:
1274   case Instruction::FPToUI:
1275   case Instruction::FPToSI:
1276   case Instruction::SIToFP:
1277   case Instruction::UIToFP:
1278     break; // Can't work with floating point.
1279   case Instruction::PtrToInt:
1280   case Instruction::IntToPtr:
1281     // Fall through and handle them the same as zext/trunc.
1282     LLVM_FALLTHROUGH;
1283   case Instruction::ZExt:
1284   case Instruction::Trunc: {
1285     Type *SrcTy = I->getOperand(0)->getType();
1286 
1287     unsigned SrcBitWidth;
1288     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1289     // which fall through here.
1290     Type *ScalarTy = SrcTy->getScalarType();
1291     SrcBitWidth = ScalarTy->isPointerTy() ?
1292       Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1293       Q.DL.getTypeSizeInBits(ScalarTy);
1294 
1295     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1296     Known = Known.anyextOrTrunc(SrcBitWidth);
1297     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1298     Known = Known.zextOrTrunc(BitWidth);
1299     break;
1300   }
1301   case Instruction::BitCast: {
1302     Type *SrcTy = I->getOperand(0)->getType();
1303     if (SrcTy->isIntOrPtrTy() &&
1304         // TODO: For now, not handling conversions like:
1305         // (bitcast i64 %x to <2 x i32>)
1306         !I->getType()->isVectorTy()) {
1307       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1308       break;
1309     }
1310     break;
1311   }
1312   case Instruction::SExt: {
1313     // Compute the bits in the result that are not present in the input.
1314     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1315 
1316     Known = Known.trunc(SrcBitWidth);
1317     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1318     // If the sign bit of the input is known set or clear, then we know the
1319     // top bits of the result.
1320     Known = Known.sext(BitWidth);
1321     break;
1322   }
1323   case Instruction::Shl: {
1324     // (shl X, C1) & C2 == 0   iff   (X & C2 >>u C1) == 0
1325     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1326     auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1327       APInt KZResult = KnownZero << ShiftAmt;
1328       KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1329       // If this shift has "nsw" keyword, then the result is either a poison
1330       // value or has the same sign bit as the first operand.
1331       if (NSW && KnownZero.isSignBitSet())
1332         KZResult.setSignBit();
1333       return KZResult;
1334     };
1335 
1336     auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1337       APInt KOResult = KnownOne << ShiftAmt;
1338       if (NSW && KnownOne.isSignBitSet())
1339         KOResult.setSignBit();
1340       return KOResult;
1341     };
1342 
1343     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1344                                       KZF, KOF);
1345     break;
1346   }
1347   case Instruction::LShr: {
1348     // (lshr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1349     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1350       APInt KZResult = KnownZero.lshr(ShiftAmt);
1351       // High bits known zero.
1352       KZResult.setHighBits(ShiftAmt);
1353       return KZResult;
1354     };
1355 
1356     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1357       return KnownOne.lshr(ShiftAmt);
1358     };
1359 
1360     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1361                                       KZF, KOF);
1362     break;
1363   }
1364   case Instruction::AShr: {
1365     // (ashr X, C1) & C2 == 0   iff  (-1 >> C1) & C2 == 0
1366     auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1367       return KnownZero.ashr(ShiftAmt);
1368     };
1369 
1370     auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1371       return KnownOne.ashr(ShiftAmt);
1372     };
1373 
1374     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1375                                       KZF, KOF);
1376     break;
1377   }
1378   case Instruction::Sub: {
1379     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1380     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1381                            DemandedElts, Known, Known2, Depth, Q);
1382     break;
1383   }
1384   case Instruction::Add: {
1385     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1386     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1387                            DemandedElts, Known, Known2, Depth, Q);
1388     break;
1389   }
1390   case Instruction::SRem:
1391     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1392       APInt RA = Rem->getValue().abs();
1393       if (RA.isPowerOf2()) {
1394         APInt LowBits = RA - 1;
1395         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1396 
1397         // The low bits of the first operand are unchanged by the srem.
1398         Known.Zero = Known2.Zero & LowBits;
1399         Known.One = Known2.One & LowBits;
1400 
1401         // If the first operand is non-negative or has all low bits zero, then
1402         // the upper bits are all zero.
1403         if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1404           Known.Zero |= ~LowBits;
1405 
1406         // If the first operand is negative and not all low bits are zero, then
1407         // the upper bits are all one.
1408         if (Known2.isNegative() && LowBits.intersects(Known2.One))
1409           Known.One |= ~LowBits;
1410 
1411         assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1412         break;
1413       }
1414     }
1415 
1416     // The sign bit is the LHS's sign bit, except when the result of the
1417     // remainder is zero.
1418     computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1419     // If it's known zero, our sign bit is also zero.
1420     if (Known2.isNonNegative())
1421       Known.makeNonNegative();
1422 
1423     break;
1424   case Instruction::URem: {
1425     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1426       const APInt &RA = Rem->getValue();
1427       if (RA.isPowerOf2()) {
1428         APInt LowBits = (RA - 1);
1429         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1430         Known.Zero |= ~LowBits;
1431         Known.One &= LowBits;
1432         break;
1433       }
1434     }
1435 
1436     // Since the result is less than or equal to either operand, any leading
1437     // zero bits in either operand must also exist in the result.
1438     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1439     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1440 
1441     unsigned Leaders =
1442         std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
1443     Known.resetAll();
1444     Known.Zero.setHighBits(Leaders);
1445     break;
1446   }
1447   case Instruction::Alloca:
1448     Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1449     break;
1450   case Instruction::GetElementPtr: {
1451     // Analyze all of the subscripts of this getelementptr instruction
1452     // to determine if we can prove known low zero bits.
1453     KnownBits LocalKnown(BitWidth);
1454     computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1455     unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1456 
1457     gep_type_iterator GTI = gep_type_begin(I);
1458     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1459       // TrailZ can only become smaller, short-circuit if we hit zero.
1460       if (TrailZ == 0)
1461         break;
1462 
1463       Value *Index = I->getOperand(i);
1464       if (StructType *STy = GTI.getStructTypeOrNull()) {
1465         // Handle struct member offset arithmetic.
1466 
1467         // Handle case when index is vector zeroinitializer
1468         Constant *CIndex = cast<Constant>(Index);
1469         if (CIndex->isZeroValue())
1470           continue;
1471 
1472         if (CIndex->getType()->isVectorTy())
1473           Index = CIndex->getSplatValue();
1474 
1475         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1476         const StructLayout *SL = Q.DL.getStructLayout(STy);
1477         uint64_t Offset = SL->getElementOffset(Idx);
1478         TrailZ = std::min<unsigned>(TrailZ,
1479                                     countTrailingZeros(Offset));
1480       } else {
1481         // Handle array index arithmetic.
1482         Type *IndexedTy = GTI.getIndexedType();
1483         if (!IndexedTy->isSized()) {
1484           TrailZ = 0;
1485           break;
1486         }
1487         unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1488         uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy).getKnownMinSize();
1489         LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1490         computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1491         TrailZ = std::min(TrailZ,
1492                           unsigned(countTrailingZeros(TypeSize) +
1493                                    LocalKnown.countMinTrailingZeros()));
1494       }
1495     }
1496 
1497     Known.Zero.setLowBits(TrailZ);
1498     break;
1499   }
1500   case Instruction::PHI: {
1501     const PHINode *P = cast<PHINode>(I);
1502     // Handle the case of a simple two-predecessor recurrence PHI.
1503     // There's a lot more that could theoretically be done here, but
1504     // this is sufficient to catch some interesting cases.
1505     if (P->getNumIncomingValues() == 2) {
1506       for (unsigned i = 0; i != 2; ++i) {
1507         Value *L = P->getIncomingValue(i);
1508         Value *R = P->getIncomingValue(!i);
1509         Instruction *RInst = P->getIncomingBlock(!i)->getTerminator();
1510         Instruction *LInst = P->getIncomingBlock(i)->getTerminator();
1511         Operator *LU = dyn_cast<Operator>(L);
1512         if (!LU)
1513           continue;
1514         unsigned Opcode = LU->getOpcode();
1515         // Check for operations that have the property that if
1516         // both their operands have low zero bits, the result
1517         // will have low zero bits.
1518         if (Opcode == Instruction::Add ||
1519             Opcode == Instruction::Sub ||
1520             Opcode == Instruction::And ||
1521             Opcode == Instruction::Or ||
1522             Opcode == Instruction::Mul) {
1523           Value *LL = LU->getOperand(0);
1524           Value *LR = LU->getOperand(1);
1525           // Find a recurrence.
1526           if (LL == I)
1527             L = LR;
1528           else if (LR == I)
1529             L = LL;
1530           else
1531             continue; // Check for recurrence with L and R flipped.
1532 
1533           // Change the context instruction to the "edge" that flows into the
1534           // phi. This is important because that is where the value is actually
1535           // "evaluated" even though it is used later somewhere else. (see also
1536           // D69571).
1537           Query RecQ = Q;
1538 
1539           // Ok, we have a PHI of the form L op= R. Check for low
1540           // zero bits.
1541           RecQ.CxtI = RInst;
1542           computeKnownBits(R, Known2, Depth + 1, RecQ);
1543 
1544           // We need to take the minimum number of known bits
1545           KnownBits Known3(BitWidth);
1546           RecQ.CxtI = LInst;
1547           computeKnownBits(L, Known3, Depth + 1, RecQ);
1548 
1549           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1550                                          Known3.countMinTrailingZeros()));
1551 
1552           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1553           if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1554             // If initial value of recurrence is nonnegative, and we are adding
1555             // a nonnegative number with nsw, the result can only be nonnegative
1556             // or poison value regardless of the number of times we execute the
1557             // add in phi recurrence. If initial value is negative and we are
1558             // adding a negative number with nsw, the result can only be
1559             // negative or poison value. Similar arguments apply to sub and mul.
1560             //
1561             // (add non-negative, non-negative) --> non-negative
1562             // (add negative, negative) --> negative
1563             if (Opcode == Instruction::Add) {
1564               if (Known2.isNonNegative() && Known3.isNonNegative())
1565                 Known.makeNonNegative();
1566               else if (Known2.isNegative() && Known3.isNegative())
1567                 Known.makeNegative();
1568             }
1569 
1570             // (sub nsw non-negative, negative) --> non-negative
1571             // (sub nsw negative, non-negative) --> negative
1572             else if (Opcode == Instruction::Sub && LL == I) {
1573               if (Known2.isNonNegative() && Known3.isNegative())
1574                 Known.makeNonNegative();
1575               else if (Known2.isNegative() && Known3.isNonNegative())
1576                 Known.makeNegative();
1577             }
1578 
1579             // (mul nsw non-negative, non-negative) --> non-negative
1580             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1581                      Known3.isNonNegative())
1582               Known.makeNonNegative();
1583           }
1584 
1585           break;
1586         }
1587       }
1588     }
1589 
1590     // Unreachable blocks may have zero-operand PHI nodes.
1591     if (P->getNumIncomingValues() == 0)
1592       break;
1593 
1594     // Otherwise take the unions of the known bit sets of the operands,
1595     // taking conservative care to avoid excessive recursion.
1596     if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1597       // Skip if every incoming value references to ourself.
1598       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1599         break;
1600 
1601       Known.Zero.setAllBits();
1602       Known.One.setAllBits();
1603       for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1604         Value *IncValue = P->getIncomingValue(u);
1605         // Skip direct self references.
1606         if (IncValue == P) continue;
1607 
1608         // Change the context instruction to the "edge" that flows into the
1609         // phi. This is important because that is where the value is actually
1610         // "evaluated" even though it is used later somewhere else. (see also
1611         // D69571).
1612         Query RecQ = Q;
1613         RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1614 
1615         Known2 = KnownBits(BitWidth);
1616         // Recurse, but cap the recursion to one level, because we don't
1617         // want to waste time spinning around in loops.
1618         computeKnownBits(IncValue, Known2, MaxDepth - 1, RecQ);
1619         Known.Zero &= Known2.Zero;
1620         Known.One &= Known2.One;
1621         // If all bits have been ruled out, there's no need to check
1622         // more operands.
1623         if (!Known.Zero && !Known.One)
1624           break;
1625       }
1626     }
1627     break;
1628   }
1629   case Instruction::Call:
1630   case Instruction::Invoke:
1631     // If range metadata is attached to this call, set known bits from that,
1632     // and then intersect with known bits based on other properties of the
1633     // function.
1634     if (MDNode *MD =
1635             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1636       computeKnownBitsFromRangeMetadata(*MD, Known);
1637     if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1638       computeKnownBits(RV, Known2, Depth + 1, Q);
1639       Known.Zero |= Known2.Zero;
1640       Known.One |= Known2.One;
1641     }
1642     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1643       switch (II->getIntrinsicID()) {
1644       default: break;
1645       case Intrinsic::abs:
1646         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1647         // Otherwise, if this call is undefined for INT_MIN, the result is
1648         // positive.
1649         if (match(II->getArgOperand(1), m_One()))
1650           Known.Zero.setSignBit();
1651         // Absolute value preserves trailing zero count.
1652         Known.Zero.setLowBits(Known2.Zero.countTrailingOnes());
1653         // FIXME: Handle known negative/non-negative input?
1654         // FIXME: Calculate the negated Known bits and combine them?
1655         break;
1656       case Intrinsic::bitreverse:
1657         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1658         Known.Zero |= Known2.Zero.reverseBits();
1659         Known.One |= Known2.One.reverseBits();
1660         break;
1661       case Intrinsic::bswap:
1662         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1663         Known.Zero |= Known2.Zero.byteSwap();
1664         Known.One |= Known2.One.byteSwap();
1665         break;
1666       case Intrinsic::ctlz: {
1667         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1668         // If we have a known 1, its position is our upper bound.
1669         unsigned PossibleLZ = Known2.One.countLeadingZeros();
1670         // If this call is undefined for 0, the result will be less than 2^n.
1671         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1672           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1673         unsigned LowBits = Log2_32(PossibleLZ)+1;
1674         Known.Zero.setBitsFrom(LowBits);
1675         break;
1676       }
1677       case Intrinsic::cttz: {
1678         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1679         // If we have a known 1, its position is our upper bound.
1680         unsigned PossibleTZ = Known2.One.countTrailingZeros();
1681         // If this call is undefined for 0, the result will be less than 2^n.
1682         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1683           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1684         unsigned LowBits = Log2_32(PossibleTZ)+1;
1685         Known.Zero.setBitsFrom(LowBits);
1686         break;
1687       }
1688       case Intrinsic::ctpop: {
1689         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1690         // We can bound the space the count needs.  Also, bits known to be zero
1691         // can't contribute to the population.
1692         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1693         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1694         Known.Zero.setBitsFrom(LowBits);
1695         // TODO: we could bound KnownOne using the lower bound on the number
1696         // of bits which might be set provided by popcnt KnownOne2.
1697         break;
1698       }
1699       case Intrinsic::fshr:
1700       case Intrinsic::fshl: {
1701         const APInt *SA;
1702         if (!match(I->getOperand(2), m_APInt(SA)))
1703           break;
1704 
1705         // Normalize to funnel shift left.
1706         uint64_t ShiftAmt = SA->urem(BitWidth);
1707         if (II->getIntrinsicID() == Intrinsic::fshr)
1708           ShiftAmt = BitWidth - ShiftAmt;
1709 
1710         KnownBits Known3(BitWidth);
1711         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1712         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1713 
1714         Known.Zero =
1715             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1716         Known.One =
1717             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1718         break;
1719       }
1720       case Intrinsic::uadd_sat:
1721       case Intrinsic::usub_sat: {
1722         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1723         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1724         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1725 
1726         // Add: Leading ones of either operand are preserved.
1727         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1728         // as leading zeros in the result.
1729         unsigned LeadingKnown;
1730         if (IsAdd)
1731           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1732                                   Known2.countMinLeadingOnes());
1733         else
1734           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1735                                   Known2.countMinLeadingOnes());
1736 
1737         Known = KnownBits::computeForAddSub(
1738             IsAdd, /* NSW */ false, Known, Known2);
1739 
1740         // We select between the operation result and all-ones/zero
1741         // respectively, so we can preserve known ones/zeros.
1742         if (IsAdd) {
1743           Known.One.setHighBits(LeadingKnown);
1744           Known.Zero.clearAllBits();
1745         } else {
1746           Known.Zero.setHighBits(LeadingKnown);
1747           Known.One.clearAllBits();
1748         }
1749         break;
1750       }
1751       case Intrinsic::x86_sse42_crc32_64_64:
1752         Known.Zero.setBitsFrom(32);
1753         break;
1754       }
1755     }
1756     break;
1757   case Instruction::ShuffleVector: {
1758     auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1759     // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1760     if (!Shuf) {
1761       Known.resetAll();
1762       return;
1763     }
1764     // For undef elements, we don't know anything about the common state of
1765     // the shuffle result.
1766     APInt DemandedLHS, DemandedRHS;
1767     if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1768       Known.resetAll();
1769       return;
1770     }
1771     Known.One.setAllBits();
1772     Known.Zero.setAllBits();
1773     if (!!DemandedLHS) {
1774       const Value *LHS = Shuf->getOperand(0);
1775       computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1776       // If we don't know any bits, early out.
1777       if (Known.isUnknown())
1778         break;
1779     }
1780     if (!!DemandedRHS) {
1781       const Value *RHS = Shuf->getOperand(1);
1782       computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1783       Known.One &= Known2.One;
1784       Known.Zero &= Known2.Zero;
1785     }
1786     break;
1787   }
1788   case Instruction::InsertElement: {
1789     const Value *Vec = I->getOperand(0);
1790     const Value *Elt = I->getOperand(1);
1791     auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1792     // Early out if the index is non-constant or out-of-range.
1793     unsigned NumElts = DemandedElts.getBitWidth();
1794     if (!CIdx || CIdx->getValue().uge(NumElts)) {
1795       Known.resetAll();
1796       return;
1797     }
1798     Known.One.setAllBits();
1799     Known.Zero.setAllBits();
1800     unsigned EltIdx = CIdx->getZExtValue();
1801     // Do we demand the inserted element?
1802     if (DemandedElts[EltIdx]) {
1803       computeKnownBits(Elt, Known, Depth + 1, Q);
1804       // If we don't know any bits, early out.
1805       if (Known.isUnknown())
1806         break;
1807     }
1808     // We don't need the base vector element that has been inserted.
1809     APInt DemandedVecElts = DemandedElts;
1810     DemandedVecElts.clearBit(EltIdx);
1811     if (!!DemandedVecElts) {
1812       computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1813       Known.One &= Known2.One;
1814       Known.Zero &= Known2.Zero;
1815     }
1816     break;
1817   }
1818   case Instruction::ExtractElement: {
1819     // Look through extract element. If the index is non-constant or
1820     // out-of-range demand all elements, otherwise just the extracted element.
1821     const Value *Vec = I->getOperand(0);
1822     const Value *Idx = I->getOperand(1);
1823     auto *CIdx = dyn_cast<ConstantInt>(Idx);
1824     if (isa<ScalableVectorType>(Vec->getType())) {
1825       // FIXME: there's probably *something* we can do with scalable vectors
1826       Known.resetAll();
1827       break;
1828     }
1829     unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1830     APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1831     if (CIdx && CIdx->getValue().ult(NumElts))
1832       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1833     computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1834     break;
1835   }
1836   case Instruction::ExtractValue:
1837     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1838       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1839       if (EVI->getNumIndices() != 1) break;
1840       if (EVI->getIndices()[0] == 0) {
1841         switch (II->getIntrinsicID()) {
1842         default: break;
1843         case Intrinsic::uadd_with_overflow:
1844         case Intrinsic::sadd_with_overflow:
1845           computeKnownBitsAddSub(true, II->getArgOperand(0),
1846                                  II->getArgOperand(1), false, DemandedElts,
1847                                  Known, Known2, Depth, Q);
1848           break;
1849         case Intrinsic::usub_with_overflow:
1850         case Intrinsic::ssub_with_overflow:
1851           computeKnownBitsAddSub(false, II->getArgOperand(0),
1852                                  II->getArgOperand(1), false, DemandedElts,
1853                                  Known, Known2, Depth, Q);
1854           break;
1855         case Intrinsic::umul_with_overflow:
1856         case Intrinsic::smul_with_overflow:
1857           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1858                               DemandedElts, Known, Known2, Depth, Q);
1859           break;
1860         }
1861       }
1862     }
1863     break;
1864   }
1865 }
1866 
1867 /// Determine which bits of V are known to be either zero or one and return
1868 /// them.
1869 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1870                            unsigned Depth, const Query &Q) {
1871   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1872   computeKnownBits(V, DemandedElts, Known, Depth, Q);
1873   return Known;
1874 }
1875 
1876 /// Determine which bits of V are known to be either zero or one and return
1877 /// them.
1878 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1879   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1880   computeKnownBits(V, Known, Depth, Q);
1881   return Known;
1882 }
1883 
1884 /// Determine which bits of V are known to be either zero or one and return
1885 /// them in the Known bit set.
1886 ///
1887 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1888 /// we cannot optimize based on the assumption that it is zero without changing
1889 /// it to be an explicit zero.  If we don't change it to zero, other code could
1890 /// optimized based on the contradictory assumption that it is non-zero.
1891 /// Because instcombine aggressively folds operations with undef args anyway,
1892 /// this won't lose us code quality.
1893 ///
1894 /// This function is defined on values with integer type, values with pointer
1895 /// type, and vectors of integers.  In the case
1896 /// where V is a vector, known zero, and known one values are the
1897 /// same width as the vector element, and the bit is set only if it is true
1898 /// for all of the demanded elements in the vector specified by DemandedElts.
1899 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1900                       KnownBits &Known, unsigned Depth, const Query &Q) {
1901   if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1902     // No demanded elts or V is a scalable vector, better to assume we don't
1903     // know anything.
1904     Known.resetAll();
1905     return;
1906   }
1907 
1908   assert(V && "No Value?");
1909   assert(Depth <= MaxDepth && "Limit Search Depth");
1910 
1911 #ifndef NDEBUG
1912   Type *Ty = V->getType();
1913   unsigned BitWidth = Known.getBitWidth();
1914 
1915   assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1916          "Not integer or pointer type!");
1917 
1918   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1919     assert(
1920         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1921         "DemandedElt width should equal the fixed vector number of elements");
1922   } else {
1923     assert(DemandedElts == APInt(1, 1) &&
1924            "DemandedElt width should be 1 for scalars");
1925   }
1926 
1927   Type *ScalarTy = Ty->getScalarType();
1928   if (ScalarTy->isPointerTy()) {
1929     assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1930            "V and Known should have same BitWidth");
1931   } else {
1932     assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1933            "V and Known should have same BitWidth");
1934   }
1935 #endif
1936 
1937   const APInt *C;
1938   if (match(V, m_APInt(C))) {
1939     // We know all of the bits for a scalar constant or a splat vector constant!
1940     Known.One = *C;
1941     Known.Zero = ~Known.One;
1942     return;
1943   }
1944   // Null and aggregate-zero are all-zeros.
1945   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1946     Known.setAllZero();
1947     return;
1948   }
1949   // Handle a constant vector by taking the intersection of the known bits of
1950   // each element.
1951   if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1952     // We know that CDV must be a vector of integers. Take the intersection of
1953     // each element.
1954     Known.Zero.setAllBits(); Known.One.setAllBits();
1955     for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1956       if (!DemandedElts[i])
1957         continue;
1958       APInt Elt = CDV->getElementAsAPInt(i);
1959       Known.Zero &= ~Elt;
1960       Known.One &= Elt;
1961     }
1962     return;
1963   }
1964 
1965   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1966     // We know that CV must be a vector of integers. Take the intersection of
1967     // each element.
1968     Known.Zero.setAllBits(); Known.One.setAllBits();
1969     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1970       if (!DemandedElts[i])
1971         continue;
1972       Constant *Element = CV->getAggregateElement(i);
1973       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1974       if (!ElementCI) {
1975         Known.resetAll();
1976         return;
1977       }
1978       const APInt &Elt = ElementCI->getValue();
1979       Known.Zero &= ~Elt;
1980       Known.One &= Elt;
1981     }
1982     return;
1983   }
1984 
1985   // Start out not knowing anything.
1986   Known.resetAll();
1987 
1988   // We can't imply anything about undefs.
1989   if (isa<UndefValue>(V))
1990     return;
1991 
1992   // There's no point in looking through other users of ConstantData for
1993   // assumptions.  Confirm that we've handled them all.
1994   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1995 
1996   // Limit search depth.
1997   // All recursive calls that increase depth must come after this.
1998   if (Depth == MaxDepth)
1999     return;
2000 
2001   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2002   // the bits of its aliasee.
2003   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2004     if (!GA->isInterposable())
2005       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
2006     return;
2007   }
2008 
2009   if (const Operator *I = dyn_cast<Operator>(V))
2010     computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
2011 
2012   // Aligned pointers have trailing zeros - refine Known.Zero set
2013   if (isa<PointerType>(V->getType())) {
2014     Align Alignment = V->getPointerAlignment(Q.DL);
2015     Known.Zero.setLowBits(countTrailingZeros(Alignment.value()));
2016   }
2017 
2018   // computeKnownBitsFromAssume strictly refines Known.
2019   // Therefore, we run them after computeKnownBitsFromOperator.
2020 
2021   // Check whether a nearby assume intrinsic can determine some known bits.
2022   computeKnownBitsFromAssume(V, Known, Depth, Q);
2023 
2024   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
2025 }
2026 
2027 /// Return true if the given value is known to have exactly one
2028 /// bit set when defined. For vectors return true if every element is known to
2029 /// be a power of two when defined. Supports values with integer or pointer
2030 /// types and vectors of integers.
2031 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2032                             const Query &Q) {
2033   assert(Depth <= MaxDepth && "Limit Search Depth");
2034 
2035   // Attempt to match against constants.
2036   if (OrZero && match(V, m_Power2OrZero()))
2037       return true;
2038   if (match(V, m_Power2()))
2039       return true;
2040 
2041   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
2042   // it is shifted off the end then the result is undefined.
2043   if (match(V, m_Shl(m_One(), m_Value())))
2044     return true;
2045 
2046   // (signmask) >>l X is clearly a power of two if the one is not shifted off
2047   // the bottom.  If it is shifted off the bottom then the result is undefined.
2048   if (match(V, m_LShr(m_SignMask(), m_Value())))
2049     return true;
2050 
2051   // The remaining tests are all recursive, so bail out if we hit the limit.
2052   if (Depth++ == MaxDepth)
2053     return false;
2054 
2055   Value *X = nullptr, *Y = nullptr;
2056   // A shift left or a logical shift right of a power of two is a power of two
2057   // or zero.
2058   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2059                  match(V, m_LShr(m_Value(X), m_Value()))))
2060     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2061 
2062   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2063     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2064 
2065   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2066     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2067            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2068 
2069   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2070     // A power of two and'd with anything is a power of two or zero.
2071     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2072         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2073       return true;
2074     // X & (-X) is always a power of two or zero.
2075     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2076       return true;
2077     return false;
2078   }
2079 
2080   // Adding a power-of-two or zero to the same power-of-two or zero yields
2081   // either the original power-of-two, a larger power-of-two or zero.
2082   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2083     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2084     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2085         Q.IIQ.hasNoSignedWrap(VOBO)) {
2086       if (match(X, m_And(m_Specific(Y), m_Value())) ||
2087           match(X, m_And(m_Value(), m_Specific(Y))))
2088         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2089           return true;
2090       if (match(Y, m_And(m_Specific(X), m_Value())) ||
2091           match(Y, m_And(m_Value(), m_Specific(X))))
2092         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2093           return true;
2094 
2095       unsigned BitWidth = V->getType()->getScalarSizeInBits();
2096       KnownBits LHSBits(BitWidth);
2097       computeKnownBits(X, LHSBits, Depth, Q);
2098 
2099       KnownBits RHSBits(BitWidth);
2100       computeKnownBits(Y, RHSBits, Depth, Q);
2101       // If i8 V is a power of two or zero:
2102       //  ZeroBits: 1 1 1 0 1 1 1 1
2103       // ~ZeroBits: 0 0 0 1 0 0 0 0
2104       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2105         // If OrZero isn't set, we cannot give back a zero result.
2106         // Make sure either the LHS or RHS has a bit set.
2107         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2108           return true;
2109     }
2110   }
2111 
2112   // An exact divide or right shift can only shift off zero bits, so the result
2113   // is a power of two only if the first operand is a power of two and not
2114   // copying a sign bit (sdiv int_min, 2).
2115   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2116       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2117     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2118                                   Depth, Q);
2119   }
2120 
2121   return false;
2122 }
2123 
2124 /// Test whether a GEP's result is known to be non-null.
2125 ///
2126 /// Uses properties inherent in a GEP to try to determine whether it is known
2127 /// to be non-null.
2128 ///
2129 /// Currently this routine does not support vector GEPs.
2130 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2131                               const Query &Q) {
2132   const Function *F = nullptr;
2133   if (const Instruction *I = dyn_cast<Instruction>(GEP))
2134     F = I->getFunction();
2135 
2136   if (!GEP->isInBounds() ||
2137       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2138     return false;
2139 
2140   // FIXME: Support vector-GEPs.
2141   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2142 
2143   // If the base pointer is non-null, we cannot walk to a null address with an
2144   // inbounds GEP in address space zero.
2145   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2146     return true;
2147 
2148   // Walk the GEP operands and see if any operand introduces a non-zero offset.
2149   // If so, then the GEP cannot produce a null pointer, as doing so would
2150   // inherently violate the inbounds contract within address space zero.
2151   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2152        GTI != GTE; ++GTI) {
2153     // Struct types are easy -- they must always be indexed by a constant.
2154     if (StructType *STy = GTI.getStructTypeOrNull()) {
2155       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2156       unsigned ElementIdx = OpC->getZExtValue();
2157       const StructLayout *SL = Q.DL.getStructLayout(STy);
2158       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2159       if (ElementOffset > 0)
2160         return true;
2161       continue;
2162     }
2163 
2164     // If we have a zero-sized type, the index doesn't matter. Keep looping.
2165     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2166       continue;
2167 
2168     // Fast path the constant operand case both for efficiency and so we don't
2169     // increment Depth when just zipping down an all-constant GEP.
2170     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2171       if (!OpC->isZero())
2172         return true;
2173       continue;
2174     }
2175 
2176     // We post-increment Depth here because while isKnownNonZero increments it
2177     // as well, when we pop back up that increment won't persist. We don't want
2178     // to recurse 10k times just because we have 10k GEP operands. We don't
2179     // bail completely out because we want to handle constant GEPs regardless
2180     // of depth.
2181     if (Depth++ >= MaxDepth)
2182       continue;
2183 
2184     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2185       return true;
2186   }
2187 
2188   return false;
2189 }
2190 
2191 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2192                                                   const Instruction *CtxI,
2193                                                   const DominatorTree *DT) {
2194   if (isa<Constant>(V))
2195     return false;
2196 
2197   if (!CtxI || !DT)
2198     return false;
2199 
2200   unsigned NumUsesExplored = 0;
2201   for (auto *U : V->users()) {
2202     // Avoid massive lists
2203     if (NumUsesExplored >= DomConditionsMaxUses)
2204       break;
2205     NumUsesExplored++;
2206 
2207     // If the value is used as an argument to a call or invoke, then argument
2208     // attributes may provide an answer about null-ness.
2209     if (const auto *CB = dyn_cast<CallBase>(U))
2210       if (auto *CalledFunc = CB->getCalledFunction())
2211         for (const Argument &Arg : CalledFunc->args())
2212           if (CB->getArgOperand(Arg.getArgNo()) == V &&
2213               Arg.hasNonNullAttr() && DT->dominates(CB, CtxI))
2214             return true;
2215 
2216     // If the value is used as a load/store, then the pointer must be non null.
2217     if (V == getLoadStorePointerOperand(U)) {
2218       const Instruction *I = cast<Instruction>(U);
2219       if (!NullPointerIsDefined(I->getFunction(),
2220                                 V->getType()->getPointerAddressSpace()) &&
2221           DT->dominates(I, CtxI))
2222         return true;
2223     }
2224 
2225     // Consider only compare instructions uniquely controlling a branch
2226     CmpInst::Predicate Pred;
2227     if (!match(const_cast<User *>(U),
2228                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
2229         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
2230       continue;
2231 
2232     SmallVector<const User *, 4> WorkList;
2233     SmallPtrSet<const User *, 4> Visited;
2234     for (auto *CmpU : U->users()) {
2235       assert(WorkList.empty() && "Should be!");
2236       if (Visited.insert(CmpU).second)
2237         WorkList.push_back(CmpU);
2238 
2239       while (!WorkList.empty()) {
2240         auto *Curr = WorkList.pop_back_val();
2241 
2242         // If a user is an AND, add all its users to the work list. We only
2243         // propagate "pred != null" condition through AND because it is only
2244         // correct to assume that all conditions of AND are met in true branch.
2245         // TODO: Support similar logic of OR and EQ predicate?
2246         if (Pred == ICmpInst::ICMP_NE)
2247           if (auto *BO = dyn_cast<BinaryOperator>(Curr))
2248             if (BO->getOpcode() == Instruction::And) {
2249               for (auto *BOU : BO->users())
2250                 if (Visited.insert(BOU).second)
2251                   WorkList.push_back(BOU);
2252               continue;
2253             }
2254 
2255         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2256           assert(BI->isConditional() && "uses a comparison!");
2257 
2258           BasicBlock *NonNullSuccessor =
2259               BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
2260           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2261           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2262             return true;
2263         } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
2264                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
2265           return true;
2266         }
2267       }
2268     }
2269   }
2270 
2271   return false;
2272 }
2273 
2274 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2275 /// ensure that the value it's attached to is never Value?  'RangeType' is
2276 /// is the type of the value described by the range.
2277 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2278   const unsigned NumRanges = Ranges->getNumOperands() / 2;
2279   assert(NumRanges >= 1);
2280   for (unsigned i = 0; i < NumRanges; ++i) {
2281     ConstantInt *Lower =
2282         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2283     ConstantInt *Upper =
2284         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2285     ConstantRange Range(Lower->getValue(), Upper->getValue());
2286     if (Range.contains(Value))
2287       return false;
2288   }
2289   return true;
2290 }
2291 
2292 /// Return true if the given value is known to be non-zero when defined. For
2293 /// vectors, return true if every demanded element is known to be non-zero when
2294 /// defined. For pointers, if the context instruction and dominator tree are
2295 /// specified, perform context-sensitive analysis and return true if the
2296 /// pointer couldn't possibly be null at the specified instruction.
2297 /// Supports values with integer or pointer type and vectors of integers.
2298 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2299                     const Query &Q) {
2300   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2301   // vector
2302   if (isa<ScalableVectorType>(V->getType()))
2303     return false;
2304 
2305   if (auto *C = dyn_cast<Constant>(V)) {
2306     if (C->isNullValue())
2307       return false;
2308     if (isa<ConstantInt>(C))
2309       // Must be non-zero due to null test above.
2310       return true;
2311 
2312     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2313       // See the comment for IntToPtr/PtrToInt instructions below.
2314       if (CE->getOpcode() == Instruction::IntToPtr ||
2315           CE->getOpcode() == Instruction::PtrToInt)
2316         if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) <=
2317             Q.DL.getTypeSizeInBits(CE->getType()))
2318           return isKnownNonZero(CE->getOperand(0), Depth, Q);
2319     }
2320 
2321     // For constant vectors, check that all elements are undefined or known
2322     // non-zero to determine that the whole vector is known non-zero.
2323     if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2324       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2325         if (!DemandedElts[i])
2326           continue;
2327         Constant *Elt = C->getAggregateElement(i);
2328         if (!Elt || Elt->isNullValue())
2329           return false;
2330         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2331           return false;
2332       }
2333       return true;
2334     }
2335 
2336     // A global variable in address space 0 is non null unless extern weak
2337     // or an absolute symbol reference. Other address spaces may have null as a
2338     // valid address for a global, so we can't assume anything.
2339     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2340       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2341           GV->getType()->getAddressSpace() == 0)
2342         return true;
2343     } else
2344       return false;
2345   }
2346 
2347   if (auto *I = dyn_cast<Instruction>(V)) {
2348     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2349       // If the possible ranges don't contain zero, then the value is
2350       // definitely non-zero.
2351       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2352         const APInt ZeroValue(Ty->getBitWidth(), 0);
2353         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2354           return true;
2355       }
2356     }
2357   }
2358 
2359   if (isKnownNonZeroFromAssume(V, Q))
2360     return true;
2361 
2362   // Some of the tests below are recursive, so bail out if we hit the limit.
2363   if (Depth++ >= MaxDepth)
2364     return false;
2365 
2366   // Check for pointer simplifications.
2367 
2368   if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2369     // Alloca never returns null, malloc might.
2370     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2371       return true;
2372 
2373     // A byval, inalloca may not be null in a non-default addres space. A
2374     // nonnull argument is assumed never 0.
2375     if (const Argument *A = dyn_cast<Argument>(V)) {
2376       if (((A->hasPassPointeeByValueCopyAttr() &&
2377             !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2378            A->hasNonNullAttr()))
2379         return true;
2380     }
2381 
2382     // A Load tagged with nonnull metadata is never null.
2383     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2384       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2385         return true;
2386 
2387     if (const auto *Call = dyn_cast<CallBase>(V)) {
2388       if (Call->isReturnNonNull())
2389         return true;
2390       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2391         return isKnownNonZero(RP, Depth, Q);
2392     }
2393   }
2394 
2395   if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2396     return true;
2397 
2398   // Check for recursive pointer simplifications.
2399   if (V->getType()->isPointerTy()) {
2400     // Look through bitcast operations, GEPs, and int2ptr instructions as they
2401     // do not alter the value, or at least not the nullness property of the
2402     // value, e.g., int2ptr is allowed to zero/sign extend the value.
2403     //
2404     // Note that we have to take special care to avoid looking through
2405     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2406     // as casts that can alter the value, e.g., AddrSpaceCasts.
2407     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2408       if (isGEPKnownNonNull(GEP, Depth, Q))
2409         return true;
2410 
2411     if (auto *BCO = dyn_cast<BitCastOperator>(V))
2412       return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2413 
2414     if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2415       if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
2416           Q.DL.getTypeSizeInBits(I2P->getDestTy()))
2417         return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2418   }
2419 
2420   // Similar to int2ptr above, we can look through ptr2int here if the cast
2421   // is a no-op or an extend and not a truncate.
2422   if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2423     if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
2424         Q.DL.getTypeSizeInBits(P2I->getDestTy()))
2425       return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2426 
2427   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2428 
2429   // X | Y != 0 if X != 0 or Y != 0.
2430   Value *X = nullptr, *Y = nullptr;
2431   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2432     return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2433            isKnownNonZero(Y, DemandedElts, Depth, Q);
2434 
2435   // ext X != 0 if X != 0.
2436   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2437     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2438 
2439   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2440   // if the lowest bit is shifted off the end.
2441   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2442     // shl nuw can't remove any non-zero bits.
2443     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2444     if (Q.IIQ.hasNoUnsignedWrap(BO))
2445       return isKnownNonZero(X, Depth, Q);
2446 
2447     KnownBits Known(BitWidth);
2448     computeKnownBits(X, DemandedElts, Known, Depth, Q);
2449     if (Known.One[0])
2450       return true;
2451   }
2452   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2453   // defined if the sign bit is shifted off the end.
2454   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2455     // shr exact can only shift out zero bits.
2456     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2457     if (BO->isExact())
2458       return isKnownNonZero(X, Depth, Q);
2459 
2460     KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2461     if (Known.isNegative())
2462       return true;
2463 
2464     // If the shifter operand is a constant, and all of the bits shifted
2465     // out are known to be zero, and X is known non-zero then at least one
2466     // non-zero bit must remain.
2467     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2468       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2469       // Is there a known one in the portion not shifted out?
2470       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2471         return true;
2472       // Are all the bits to be shifted out known zero?
2473       if (Known.countMinTrailingZeros() >= ShiftVal)
2474         return isKnownNonZero(X, DemandedElts, Depth, Q);
2475     }
2476   }
2477   // div exact can only produce a zero if the dividend is zero.
2478   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2479     return isKnownNonZero(X, DemandedElts, Depth, Q);
2480   }
2481   // X + Y.
2482   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2483     KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2484     KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2485 
2486     // If X and Y are both non-negative (as signed values) then their sum is not
2487     // zero unless both X and Y are zero.
2488     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2489       if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2490           isKnownNonZero(Y, DemandedElts, Depth, Q))
2491         return true;
2492 
2493     // If X and Y are both negative (as signed values) then their sum is not
2494     // zero unless both X and Y equal INT_MIN.
2495     if (XKnown.isNegative() && YKnown.isNegative()) {
2496       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2497       // The sign bit of X is set.  If some other bit is set then X is not equal
2498       // to INT_MIN.
2499       if (XKnown.One.intersects(Mask))
2500         return true;
2501       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2502       // to INT_MIN.
2503       if (YKnown.One.intersects(Mask))
2504         return true;
2505     }
2506 
2507     // The sum of a non-negative number and a power of two is not zero.
2508     if (XKnown.isNonNegative() &&
2509         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2510       return true;
2511     if (YKnown.isNonNegative() &&
2512         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2513       return true;
2514   }
2515   // X * Y.
2516   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2517     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2518     // If X and Y are non-zero then so is X * Y as long as the multiplication
2519     // does not overflow.
2520     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2521         isKnownNonZero(X, DemandedElts, Depth, Q) &&
2522         isKnownNonZero(Y, DemandedElts, Depth, Q))
2523       return true;
2524   }
2525   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2526   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2527     if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2528         isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2529       return true;
2530   }
2531   // PHI
2532   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2533     // Try and detect a recurrence that monotonically increases from a
2534     // starting value, as these are common as induction variables.
2535     if (PN->getNumIncomingValues() == 2) {
2536       Value *Start = PN->getIncomingValue(0);
2537       Value *Induction = PN->getIncomingValue(1);
2538       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2539         std::swap(Start, Induction);
2540       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2541         if (!C->isZero() && !C->isNegative()) {
2542           ConstantInt *X;
2543           if (Q.IIQ.UseInstrInfo &&
2544               (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2545                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2546               !X->isNegative())
2547             return true;
2548         }
2549       }
2550     }
2551     // Check if all incoming values are non-zero constant.
2552     bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2553       return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2554     });
2555     if (AllNonZeroConstants)
2556       return true;
2557   }
2558   // ExtractElement
2559   else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2560     const Value *Vec = EEI->getVectorOperand();
2561     const Value *Idx = EEI->getIndexOperand();
2562     auto *CIdx = dyn_cast<ConstantInt>(Idx);
2563     unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
2564     APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2565     if (CIdx && CIdx->getValue().ult(NumElts))
2566       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2567     return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2568   }
2569 
2570   KnownBits Known(BitWidth);
2571   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2572   return Known.One != 0;
2573 }
2574 
2575 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2576   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2577   // vector
2578   if (isa<ScalableVectorType>(V->getType()))
2579     return false;
2580 
2581   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2582   APInt DemandedElts =
2583       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2584   return isKnownNonZero(V, DemandedElts, Depth, Q);
2585 }
2586 
2587 /// Return true if V2 == V1 + X, where X is known non-zero.
2588 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2589   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2590   if (!BO || BO->getOpcode() != Instruction::Add)
2591     return false;
2592   Value *Op = nullptr;
2593   if (V2 == BO->getOperand(0))
2594     Op = BO->getOperand(1);
2595   else if (V2 == BO->getOperand(1))
2596     Op = BO->getOperand(0);
2597   else
2598     return false;
2599   return isKnownNonZero(Op, 0, Q);
2600 }
2601 
2602 /// Return true if it is known that V1 != V2.
2603 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2604   if (V1 == V2)
2605     return false;
2606   if (V1->getType() != V2->getType())
2607     // We can't look through casts yet.
2608     return false;
2609   if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2610     return true;
2611 
2612   if (V1->getType()->isIntOrIntVectorTy()) {
2613     // Are any known bits in V1 contradictory to known bits in V2? If V1
2614     // has a known zero where V2 has a known one, they must not be equal.
2615     KnownBits Known1 = computeKnownBits(V1, 0, Q);
2616     KnownBits Known2 = computeKnownBits(V2, 0, Q);
2617 
2618     if (Known1.Zero.intersects(Known2.One) ||
2619         Known2.Zero.intersects(Known1.One))
2620       return true;
2621   }
2622   return false;
2623 }
2624 
2625 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2626 /// simplify operations downstream. Mask is known to be zero for bits that V
2627 /// cannot have.
2628 ///
2629 /// This function is defined on values with integer type, values with pointer
2630 /// type, and vectors of integers.  In the case
2631 /// where V is a vector, the mask, known zero, and known one values are the
2632 /// same width as the vector element, and the bit is set only if it is true
2633 /// for all of the elements in the vector.
2634 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2635                        const Query &Q) {
2636   KnownBits Known(Mask.getBitWidth());
2637   computeKnownBits(V, Known, Depth, Q);
2638   return Mask.isSubsetOf(Known.Zero);
2639 }
2640 
2641 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2642 // Returns the input and lower/upper bounds.
2643 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2644                                 const APInt *&CLow, const APInt *&CHigh) {
2645   assert(isa<Operator>(Select) &&
2646          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2647          "Input should be a Select!");
2648 
2649   const Value *LHS = nullptr, *RHS = nullptr;
2650   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2651   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2652     return false;
2653 
2654   if (!match(RHS, m_APInt(CLow)))
2655     return false;
2656 
2657   const Value *LHS2 = nullptr, *RHS2 = nullptr;
2658   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2659   if (getInverseMinMaxFlavor(SPF) != SPF2)
2660     return false;
2661 
2662   if (!match(RHS2, m_APInt(CHigh)))
2663     return false;
2664 
2665   if (SPF == SPF_SMIN)
2666     std::swap(CLow, CHigh);
2667 
2668   In = LHS2;
2669   return CLow->sle(*CHigh);
2670 }
2671 
2672 /// For vector constants, loop over the elements and find the constant with the
2673 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2674 /// or if any element was not analyzed; otherwise, return the count for the
2675 /// element with the minimum number of sign bits.
2676 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2677                                                  const APInt &DemandedElts,
2678                                                  unsigned TyBits) {
2679   const auto *CV = dyn_cast<Constant>(V);
2680   if (!CV || !isa<FixedVectorType>(CV->getType()))
2681     return 0;
2682 
2683   unsigned MinSignBits = TyBits;
2684   unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2685   for (unsigned i = 0; i != NumElts; ++i) {
2686     if (!DemandedElts[i])
2687       continue;
2688     // If we find a non-ConstantInt, bail out.
2689     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2690     if (!Elt)
2691       return 0;
2692 
2693     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2694   }
2695 
2696   return MinSignBits;
2697 }
2698 
2699 static unsigned ComputeNumSignBitsImpl(const Value *V,
2700                                        const APInt &DemandedElts,
2701                                        unsigned Depth, const Query &Q);
2702 
2703 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2704                                    unsigned Depth, const Query &Q) {
2705   unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2706   assert(Result > 0 && "At least one sign bit needs to be present!");
2707   return Result;
2708 }
2709 
2710 /// Return the number of times the sign bit of the register is replicated into
2711 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2712 /// (itself), but other cases can give us information. For example, immediately
2713 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2714 /// other, so we return 3. For vectors, return the number of sign bits for the
2715 /// vector element with the minimum number of known sign bits of the demanded
2716 /// elements in the vector specified by DemandedElts.
2717 static unsigned ComputeNumSignBitsImpl(const Value *V,
2718                                        const APInt &DemandedElts,
2719                                        unsigned Depth, const Query &Q) {
2720   Type *Ty = V->getType();
2721 
2722   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2723   // vector
2724   if (isa<ScalableVectorType>(Ty))
2725     return 1;
2726 
2727 #ifndef NDEBUG
2728   assert(Depth <= MaxDepth && "Limit Search Depth");
2729 
2730   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2731     assert(
2732         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2733         "DemandedElt width should equal the fixed vector number of elements");
2734   } else {
2735     assert(DemandedElts == APInt(1, 1) &&
2736            "DemandedElt width should be 1 for scalars");
2737   }
2738 #endif
2739 
2740   // We return the minimum number of sign bits that are guaranteed to be present
2741   // in V, so for undef we have to conservatively return 1.  We don't have the
2742   // same behavior for poison though -- that's a FIXME today.
2743 
2744   Type *ScalarTy = Ty->getScalarType();
2745   unsigned TyBits = ScalarTy->isPointerTy() ?
2746     Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2747     Q.DL.getTypeSizeInBits(ScalarTy);
2748 
2749   unsigned Tmp, Tmp2;
2750   unsigned FirstAnswer = 1;
2751 
2752   // Note that ConstantInt is handled by the general computeKnownBits case
2753   // below.
2754 
2755   if (Depth == MaxDepth)
2756     return 1;  // Limit search depth.
2757 
2758   if (auto *U = dyn_cast<Operator>(V)) {
2759     switch (Operator::getOpcode(V)) {
2760     default: break;
2761     case Instruction::SExt:
2762       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2763       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2764 
2765     case Instruction::SDiv: {
2766       const APInt *Denominator;
2767       // sdiv X, C -> adds log(C) sign bits.
2768       if (match(U->getOperand(1), m_APInt(Denominator))) {
2769 
2770         // Ignore non-positive denominator.
2771         if (!Denominator->isStrictlyPositive())
2772           break;
2773 
2774         // Calculate the incoming numerator bits.
2775         unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2776 
2777         // Add floor(log(C)) bits to the numerator bits.
2778         return std::min(TyBits, NumBits + Denominator->logBase2());
2779       }
2780       break;
2781     }
2782 
2783     case Instruction::SRem: {
2784       const APInt *Denominator;
2785       // srem X, C -> we know that the result is within [-C+1,C) when C is a
2786       // positive constant.  This let us put a lower bound on the number of sign
2787       // bits.
2788       if (match(U->getOperand(1), m_APInt(Denominator))) {
2789 
2790         // Ignore non-positive denominator.
2791         if (!Denominator->isStrictlyPositive())
2792           break;
2793 
2794         // Calculate the incoming numerator bits. SRem by a positive constant
2795         // can't lower the number of sign bits.
2796         unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2797 
2798         // Calculate the leading sign bit constraints by examining the
2799         // denominator.  Given that the denominator is positive, there are two
2800         // cases:
2801         //
2802         //  1. the numerator is positive. The result range is [0,C) and [0,C) u<
2803         //     (1 << ceilLogBase2(C)).
2804         //
2805         //  2. the numerator is negative. Then the result range is (-C,0] and
2806         //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2807         //
2808         // Thus a lower bound on the number of sign bits is `TyBits -
2809         // ceilLogBase2(C)`.
2810 
2811         unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2812         return std::max(NumrBits, ResBits);
2813       }
2814       break;
2815     }
2816 
2817     case Instruction::AShr: {
2818       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2819       // ashr X, C   -> adds C sign bits.  Vectors too.
2820       const APInt *ShAmt;
2821       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2822         if (ShAmt->uge(TyBits))
2823           break; // Bad shift.
2824         unsigned ShAmtLimited = ShAmt->getZExtValue();
2825         Tmp += ShAmtLimited;
2826         if (Tmp > TyBits) Tmp = TyBits;
2827       }
2828       return Tmp;
2829     }
2830     case Instruction::Shl: {
2831       const APInt *ShAmt;
2832       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2833         // shl destroys sign bits.
2834         Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2835         if (ShAmt->uge(TyBits) ||   // Bad shift.
2836             ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2837         Tmp2 = ShAmt->getZExtValue();
2838         return Tmp - Tmp2;
2839       }
2840       break;
2841     }
2842     case Instruction::And:
2843     case Instruction::Or:
2844     case Instruction::Xor: // NOT is handled here.
2845       // Logical binary ops preserve the number of sign bits at the worst.
2846       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2847       if (Tmp != 1) {
2848         Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2849         FirstAnswer = std::min(Tmp, Tmp2);
2850         // We computed what we know about the sign bits as our first
2851         // answer. Now proceed to the generic code that uses
2852         // computeKnownBits, and pick whichever answer is better.
2853       }
2854       break;
2855 
2856     case Instruction::Select: {
2857       // If we have a clamp pattern, we know that the number of sign bits will
2858       // be the minimum of the clamp min/max range.
2859       const Value *X;
2860       const APInt *CLow, *CHigh;
2861       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2862         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2863 
2864       Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2865       if (Tmp == 1) break;
2866       Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2867       return std::min(Tmp, Tmp2);
2868     }
2869 
2870     case Instruction::Add:
2871       // Add can have at most one carry bit.  Thus we know that the output
2872       // is, at worst, one more bit than the inputs.
2873       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2874       if (Tmp == 1) break;
2875 
2876       // Special case decrementing a value (ADD X, -1):
2877       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2878         if (CRHS->isAllOnesValue()) {
2879           KnownBits Known(TyBits);
2880           computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2881 
2882           // If the input is known to be 0 or 1, the output is 0/-1, which is
2883           // all sign bits set.
2884           if ((Known.Zero | 1).isAllOnesValue())
2885             return TyBits;
2886 
2887           // If we are subtracting one from a positive number, there is no carry
2888           // out of the result.
2889           if (Known.isNonNegative())
2890             return Tmp;
2891         }
2892 
2893       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2894       if (Tmp2 == 1) break;
2895       return std::min(Tmp, Tmp2) - 1;
2896 
2897     case Instruction::Sub:
2898       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2899       if (Tmp2 == 1) break;
2900 
2901       // Handle NEG.
2902       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2903         if (CLHS->isNullValue()) {
2904           KnownBits Known(TyBits);
2905           computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2906           // If the input is known to be 0 or 1, the output is 0/-1, which is
2907           // all sign bits set.
2908           if ((Known.Zero | 1).isAllOnesValue())
2909             return TyBits;
2910 
2911           // If the input is known to be positive (the sign bit is known clear),
2912           // the output of the NEG has the same number of sign bits as the
2913           // input.
2914           if (Known.isNonNegative())
2915             return Tmp2;
2916 
2917           // Otherwise, we treat this like a SUB.
2918         }
2919 
2920       // Sub can have at most one carry bit.  Thus we know that the output
2921       // is, at worst, one more bit than the inputs.
2922       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2923       if (Tmp == 1) break;
2924       return std::min(Tmp, Tmp2) - 1;
2925 
2926     case Instruction::Mul: {
2927       // The output of the Mul can be at most twice the valid bits in the
2928       // inputs.
2929       unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2930       if (SignBitsOp0 == 1) break;
2931       unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2932       if (SignBitsOp1 == 1) break;
2933       unsigned OutValidBits =
2934           (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2935       return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2936     }
2937 
2938     case Instruction::PHI: {
2939       const PHINode *PN = cast<PHINode>(U);
2940       unsigned NumIncomingValues = PN->getNumIncomingValues();
2941       // Don't analyze large in-degree PHIs.
2942       if (NumIncomingValues > 4) break;
2943       // Unreachable blocks may have zero-operand PHI nodes.
2944       if (NumIncomingValues == 0) break;
2945 
2946       // Take the minimum of all incoming values.  This can't infinitely loop
2947       // because of our depth threshold.
2948       Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2949       for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2950         if (Tmp == 1) return Tmp;
2951         Tmp = std::min(
2952             Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2953       }
2954       return Tmp;
2955     }
2956 
2957     case Instruction::Trunc:
2958       // FIXME: it's tricky to do anything useful for this, but it is an
2959       // important case for targets like X86.
2960       break;
2961 
2962     case Instruction::ExtractElement:
2963       // Look through extract element. At the moment we keep this simple and
2964       // skip tracking the specific element. But at least we might find
2965       // information valid for all elements of the vector (for example if vector
2966       // is sign extended, shifted, etc).
2967       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2968 
2969     case Instruction::ShuffleVector: {
2970       // Collect the minimum number of sign bits that are shared by every vector
2971       // element referenced by the shuffle.
2972       auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
2973       if (!Shuf) {
2974         // FIXME: Add support for shufflevector constant expressions.
2975         return 1;
2976       }
2977       APInt DemandedLHS, DemandedRHS;
2978       // For undef elements, we don't know anything about the common state of
2979       // the shuffle result.
2980       if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
2981         return 1;
2982       Tmp = std::numeric_limits<unsigned>::max();
2983       if (!!DemandedLHS) {
2984         const Value *LHS = Shuf->getOperand(0);
2985         Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
2986       }
2987       // If we don't know anything, early out and try computeKnownBits
2988       // fall-back.
2989       if (Tmp == 1)
2990         break;
2991       if (!!DemandedRHS) {
2992         const Value *RHS = Shuf->getOperand(1);
2993         Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
2994         Tmp = std::min(Tmp, Tmp2);
2995       }
2996       // If we don't know anything, early out and try computeKnownBits
2997       // fall-back.
2998       if (Tmp == 1)
2999         break;
3000       assert(Tmp <= Ty->getScalarSizeInBits() &&
3001              "Failed to determine minimum sign bits");
3002       return Tmp;
3003     }
3004     case Instruction::Call: {
3005       if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3006         switch (II->getIntrinsicID()) {
3007         default: break;
3008         case Intrinsic::abs:
3009           Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3010           if (Tmp == 1) break;
3011 
3012           // Absolute value reduces number of sign bits by at most 1.
3013           return Tmp - 1;
3014         }
3015       }
3016     }
3017     }
3018   }
3019 
3020   // Finally, if we can prove that the top bits of the result are 0's or 1's,
3021   // use this information.
3022 
3023   // If we can examine all elements of a vector constant successfully, we're
3024   // done (we can't do any better than that). If not, keep trying.
3025   if (unsigned VecSignBits =
3026           computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3027     return VecSignBits;
3028 
3029   KnownBits Known(TyBits);
3030   computeKnownBits(V, DemandedElts, Known, Depth, Q);
3031 
3032   // If we know that the sign bit is either zero or one, determine the number of
3033   // identical bits in the top of the input value.
3034   return std::max(FirstAnswer, Known.countMinSignBits());
3035 }
3036 
3037 /// This function computes the integer multiple of Base that equals V.
3038 /// If successful, it returns true and returns the multiple in
3039 /// Multiple. If unsuccessful, it returns false. It looks
3040 /// through SExt instructions only if LookThroughSExt is true.
3041 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
3042                            bool LookThroughSExt, unsigned Depth) {
3043   assert(V && "No Value?");
3044   assert(Depth <= MaxDepth && "Limit Search Depth");
3045   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
3046 
3047   Type *T = V->getType();
3048 
3049   ConstantInt *CI = dyn_cast<ConstantInt>(V);
3050 
3051   if (Base == 0)
3052     return false;
3053 
3054   if (Base == 1) {
3055     Multiple = V;
3056     return true;
3057   }
3058 
3059   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
3060   Constant *BaseVal = ConstantInt::get(T, Base);
3061   if (CO && CO == BaseVal) {
3062     // Multiple is 1.
3063     Multiple = ConstantInt::get(T, 1);
3064     return true;
3065   }
3066 
3067   if (CI && CI->getZExtValue() % Base == 0) {
3068     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3069     return true;
3070   }
3071 
3072   if (Depth == MaxDepth) return false;  // Limit search depth.
3073 
3074   Operator *I = dyn_cast<Operator>(V);
3075   if (!I) return false;
3076 
3077   switch (I->getOpcode()) {
3078   default: break;
3079   case Instruction::SExt:
3080     if (!LookThroughSExt) return false;
3081     // otherwise fall through to ZExt
3082     LLVM_FALLTHROUGH;
3083   case Instruction::ZExt:
3084     return ComputeMultiple(I->getOperand(0), Base, Multiple,
3085                            LookThroughSExt, Depth+1);
3086   case Instruction::Shl:
3087   case Instruction::Mul: {
3088     Value *Op0 = I->getOperand(0);
3089     Value *Op1 = I->getOperand(1);
3090 
3091     if (I->getOpcode() == Instruction::Shl) {
3092       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3093       if (!Op1CI) return false;
3094       // Turn Op0 << Op1 into Op0 * 2^Op1
3095       APInt Op1Int = Op1CI->getValue();
3096       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3097       APInt API(Op1Int.getBitWidth(), 0);
3098       API.setBit(BitToSet);
3099       Op1 = ConstantInt::get(V->getContext(), API);
3100     }
3101 
3102     Value *Mul0 = nullptr;
3103     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3104       if (Constant *Op1C = dyn_cast<Constant>(Op1))
3105         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3106           if (Op1C->getType()->getPrimitiveSizeInBits() <
3107               MulC->getType()->getPrimitiveSizeInBits())
3108             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3109           if (Op1C->getType()->getPrimitiveSizeInBits() >
3110               MulC->getType()->getPrimitiveSizeInBits())
3111             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3112 
3113           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3114           Multiple = ConstantExpr::getMul(MulC, Op1C);
3115           return true;
3116         }
3117 
3118       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3119         if (Mul0CI->getValue() == 1) {
3120           // V == Base * Op1, so return Op1
3121           Multiple = Op1;
3122           return true;
3123         }
3124     }
3125 
3126     Value *Mul1 = nullptr;
3127     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3128       if (Constant *Op0C = dyn_cast<Constant>(Op0))
3129         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3130           if (Op0C->getType()->getPrimitiveSizeInBits() <
3131               MulC->getType()->getPrimitiveSizeInBits())
3132             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3133           if (Op0C->getType()->getPrimitiveSizeInBits() >
3134               MulC->getType()->getPrimitiveSizeInBits())
3135             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3136 
3137           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3138           Multiple = ConstantExpr::getMul(MulC, Op0C);
3139           return true;
3140         }
3141 
3142       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3143         if (Mul1CI->getValue() == 1) {
3144           // V == Base * Op0, so return Op0
3145           Multiple = Op0;
3146           return true;
3147         }
3148     }
3149   }
3150   }
3151 
3152   // We could not determine if V is a multiple of Base.
3153   return false;
3154 }
3155 
3156 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3157                                             const TargetLibraryInfo *TLI) {
3158   const Function *F = CB.getCalledFunction();
3159   if (!F)
3160     return Intrinsic::not_intrinsic;
3161 
3162   if (F->isIntrinsic())
3163     return F->getIntrinsicID();
3164 
3165   // We are going to infer semantics of a library function based on mapping it
3166   // to an LLVM intrinsic. Check that the library function is available from
3167   // this callbase and in this environment.
3168   LibFunc Func;
3169   if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3170       !CB.onlyReadsMemory())
3171     return Intrinsic::not_intrinsic;
3172 
3173   switch (Func) {
3174   default:
3175     break;
3176   case LibFunc_sin:
3177   case LibFunc_sinf:
3178   case LibFunc_sinl:
3179     return Intrinsic::sin;
3180   case LibFunc_cos:
3181   case LibFunc_cosf:
3182   case LibFunc_cosl:
3183     return Intrinsic::cos;
3184   case LibFunc_exp:
3185   case LibFunc_expf:
3186   case LibFunc_expl:
3187     return Intrinsic::exp;
3188   case LibFunc_exp2:
3189   case LibFunc_exp2f:
3190   case LibFunc_exp2l:
3191     return Intrinsic::exp2;
3192   case LibFunc_log:
3193   case LibFunc_logf:
3194   case LibFunc_logl:
3195     return Intrinsic::log;
3196   case LibFunc_log10:
3197   case LibFunc_log10f:
3198   case LibFunc_log10l:
3199     return Intrinsic::log10;
3200   case LibFunc_log2:
3201   case LibFunc_log2f:
3202   case LibFunc_log2l:
3203     return Intrinsic::log2;
3204   case LibFunc_fabs:
3205   case LibFunc_fabsf:
3206   case LibFunc_fabsl:
3207     return Intrinsic::fabs;
3208   case LibFunc_fmin:
3209   case LibFunc_fminf:
3210   case LibFunc_fminl:
3211     return Intrinsic::minnum;
3212   case LibFunc_fmax:
3213   case LibFunc_fmaxf:
3214   case LibFunc_fmaxl:
3215     return Intrinsic::maxnum;
3216   case LibFunc_copysign:
3217   case LibFunc_copysignf:
3218   case LibFunc_copysignl:
3219     return Intrinsic::copysign;
3220   case LibFunc_floor:
3221   case LibFunc_floorf:
3222   case LibFunc_floorl:
3223     return Intrinsic::floor;
3224   case LibFunc_ceil:
3225   case LibFunc_ceilf:
3226   case LibFunc_ceill:
3227     return Intrinsic::ceil;
3228   case LibFunc_trunc:
3229   case LibFunc_truncf:
3230   case LibFunc_truncl:
3231     return Intrinsic::trunc;
3232   case LibFunc_rint:
3233   case LibFunc_rintf:
3234   case LibFunc_rintl:
3235     return Intrinsic::rint;
3236   case LibFunc_nearbyint:
3237   case LibFunc_nearbyintf:
3238   case LibFunc_nearbyintl:
3239     return Intrinsic::nearbyint;
3240   case LibFunc_round:
3241   case LibFunc_roundf:
3242   case LibFunc_roundl:
3243     return Intrinsic::round;
3244   case LibFunc_roundeven:
3245   case LibFunc_roundevenf:
3246   case LibFunc_roundevenl:
3247     return Intrinsic::roundeven;
3248   case LibFunc_pow:
3249   case LibFunc_powf:
3250   case LibFunc_powl:
3251     return Intrinsic::pow;
3252   case LibFunc_sqrt:
3253   case LibFunc_sqrtf:
3254   case LibFunc_sqrtl:
3255     return Intrinsic::sqrt;
3256   }
3257 
3258   return Intrinsic::not_intrinsic;
3259 }
3260 
3261 /// Return true if we can prove that the specified FP value is never equal to
3262 /// -0.0.
3263 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3264 ///       that a value is not -0.0. It only guarantees that -0.0 may be treated
3265 ///       the same as +0.0 in floating-point ops.
3266 ///
3267 /// NOTE: this function will need to be revisited when we support non-default
3268 /// rounding modes!
3269 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3270                                 unsigned Depth) {
3271   if (auto *CFP = dyn_cast<ConstantFP>(V))
3272     return !CFP->getValueAPF().isNegZero();
3273 
3274   // Limit search depth.
3275   if (Depth == MaxDepth)
3276     return false;
3277 
3278   auto *Op = dyn_cast<Operator>(V);
3279   if (!Op)
3280     return false;
3281 
3282   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3283   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3284     return true;
3285 
3286   // sitofp and uitofp turn into +0.0 for zero.
3287   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3288     return true;
3289 
3290   if (auto *Call = dyn_cast<CallInst>(Op)) {
3291     Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3292     switch (IID) {
3293     default:
3294       break;
3295     // sqrt(-0.0) = -0.0, no other negative results are possible.
3296     case Intrinsic::sqrt:
3297     case Intrinsic::canonicalize:
3298       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3299     // fabs(x) != -0.0
3300     case Intrinsic::fabs:
3301       return true;
3302     }
3303   }
3304 
3305   return false;
3306 }
3307 
3308 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3309 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3310 /// bit despite comparing equal.
3311 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3312                                             const TargetLibraryInfo *TLI,
3313                                             bool SignBitOnly,
3314                                             unsigned Depth) {
3315   // TODO: This function does not do the right thing when SignBitOnly is true
3316   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3317   // which flips the sign bits of NaNs.  See
3318   // https://llvm.org/bugs/show_bug.cgi?id=31702.
3319 
3320   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3321     return !CFP->getValueAPF().isNegative() ||
3322            (!SignBitOnly && CFP->getValueAPF().isZero());
3323   }
3324 
3325   // Handle vector of constants.
3326   if (auto *CV = dyn_cast<Constant>(V)) {
3327     if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3328       unsigned NumElts = CVFVTy->getNumElements();
3329       for (unsigned i = 0; i != NumElts; ++i) {
3330         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3331         if (!CFP)
3332           return false;
3333         if (CFP->getValueAPF().isNegative() &&
3334             (SignBitOnly || !CFP->getValueAPF().isZero()))
3335           return false;
3336       }
3337 
3338       // All non-negative ConstantFPs.
3339       return true;
3340     }
3341   }
3342 
3343   if (Depth == MaxDepth)
3344     return false; // Limit search depth.
3345 
3346   const Operator *I = dyn_cast<Operator>(V);
3347   if (!I)
3348     return false;
3349 
3350   switch (I->getOpcode()) {
3351   default:
3352     break;
3353   // Unsigned integers are always nonnegative.
3354   case Instruction::UIToFP:
3355     return true;
3356   case Instruction::FMul:
3357   case Instruction::FDiv:
3358     // X * X is always non-negative or a NaN.
3359     // X / X is always exactly 1.0 or a NaN.
3360     if (I->getOperand(0) == I->getOperand(1) &&
3361         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3362       return true;
3363 
3364     LLVM_FALLTHROUGH;
3365   case Instruction::FAdd:
3366   case Instruction::FRem:
3367     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3368                                            Depth + 1) &&
3369            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3370                                            Depth + 1);
3371   case Instruction::Select:
3372     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3373                                            Depth + 1) &&
3374            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3375                                            Depth + 1);
3376   case Instruction::FPExt:
3377   case Instruction::FPTrunc:
3378     // Widening/narrowing never change sign.
3379     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3380                                            Depth + 1);
3381   case Instruction::ExtractElement:
3382     // Look through extract element. At the moment we keep this simple and skip
3383     // tracking the specific element. But at least we might find information
3384     // valid for all elements of the vector.
3385     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3386                                            Depth + 1);
3387   case Instruction::Call:
3388     const auto *CI = cast<CallInst>(I);
3389     Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3390     switch (IID) {
3391     default:
3392       break;
3393     case Intrinsic::maxnum: {
3394       Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3395       auto isPositiveNum = [&](Value *V) {
3396         if (SignBitOnly) {
3397           // With SignBitOnly, this is tricky because the result of
3398           // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3399           // a constant strictly greater than 0.0.
3400           const APFloat *C;
3401           return match(V, m_APFloat(C)) &&
3402                  *C > APFloat::getZero(C->getSemantics());
3403         }
3404 
3405         // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3406         // maxnum can't be ordered-less-than-zero.
3407         return isKnownNeverNaN(V, TLI) &&
3408                cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3409       };
3410 
3411       // TODO: This could be improved. We could also check that neither operand
3412       //       has its sign bit set (and at least 1 is not-NAN?).
3413       return isPositiveNum(V0) || isPositiveNum(V1);
3414     }
3415 
3416     case Intrinsic::maximum:
3417       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3418                                              Depth + 1) ||
3419              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3420                                              Depth + 1);
3421     case Intrinsic::minnum:
3422     case Intrinsic::minimum:
3423       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3424                                              Depth + 1) &&
3425              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3426                                              Depth + 1);
3427     case Intrinsic::exp:
3428     case Intrinsic::exp2:
3429     case Intrinsic::fabs:
3430       return true;
3431 
3432     case Intrinsic::sqrt:
3433       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3434       if (!SignBitOnly)
3435         return true;
3436       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3437                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3438 
3439     case Intrinsic::powi:
3440       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3441         // powi(x,n) is non-negative if n is even.
3442         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3443           return true;
3444       }
3445       // TODO: This is not correct.  Given that exp is an integer, here are the
3446       // ways that pow can return a negative value:
3447       //
3448       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3449       //   pow(-0, exp)   --> -inf if exp is negative odd.
3450       //   pow(-0, exp)   --> -0 if exp is positive odd.
3451       //   pow(-inf, exp) --> -0 if exp is negative odd.
3452       //   pow(-inf, exp) --> -inf if exp is positive odd.
3453       //
3454       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3455       // but we must return false if x == -0.  Unfortunately we do not currently
3456       // have a way of expressing this constraint.  See details in
3457       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3458       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3459                                              Depth + 1);
3460 
3461     case Intrinsic::fma:
3462     case Intrinsic::fmuladd:
3463       // x*x+y is non-negative if y is non-negative.
3464       return I->getOperand(0) == I->getOperand(1) &&
3465              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3466              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3467                                              Depth + 1);
3468     }
3469     break;
3470   }
3471   return false;
3472 }
3473 
3474 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3475                                        const TargetLibraryInfo *TLI) {
3476   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3477 }
3478 
3479 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3480   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3481 }
3482 
3483 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3484                                 unsigned Depth) {
3485   assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3486 
3487   // If we're told that infinities won't happen, assume they won't.
3488   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3489     if (FPMathOp->hasNoInfs())
3490       return true;
3491 
3492   // Handle scalar constants.
3493   if (auto *CFP = dyn_cast<ConstantFP>(V))
3494     return !CFP->isInfinity();
3495 
3496   if (Depth == MaxDepth)
3497     return false;
3498 
3499   if (auto *Inst = dyn_cast<Instruction>(V)) {
3500     switch (Inst->getOpcode()) {
3501     case Instruction::Select: {
3502       return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3503              isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3504     }
3505     case Instruction::UIToFP:
3506       // If the input type fits into the floating type the result is finite.
3507       return ilogb(APFloat::getLargest(
3508                  Inst->getType()->getScalarType()->getFltSemantics())) >=
3509              (int)Inst->getOperand(0)->getType()->getScalarSizeInBits();
3510     default:
3511       break;
3512     }
3513   }
3514 
3515   // try to handle fixed width vector constants
3516   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3517   if (VFVTy && isa<Constant>(V)) {
3518     // For vectors, verify that each element is not infinity.
3519     unsigned NumElts = VFVTy->getNumElements();
3520     for (unsigned i = 0; i != NumElts; ++i) {
3521       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3522       if (!Elt)
3523         return false;
3524       if (isa<UndefValue>(Elt))
3525         continue;
3526       auto *CElt = dyn_cast<ConstantFP>(Elt);
3527       if (!CElt || CElt->isInfinity())
3528         return false;
3529     }
3530     // All elements were confirmed non-infinity or undefined.
3531     return true;
3532   }
3533 
3534   // was not able to prove that V never contains infinity
3535   return false;
3536 }
3537 
3538 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3539                            unsigned Depth) {
3540   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3541 
3542   // If we're told that NaNs won't happen, assume they won't.
3543   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3544     if (FPMathOp->hasNoNaNs())
3545       return true;
3546 
3547   // Handle scalar constants.
3548   if (auto *CFP = dyn_cast<ConstantFP>(V))
3549     return !CFP->isNaN();
3550 
3551   if (Depth == MaxDepth)
3552     return false;
3553 
3554   if (auto *Inst = dyn_cast<Instruction>(V)) {
3555     switch (Inst->getOpcode()) {
3556     case Instruction::FAdd:
3557     case Instruction::FSub:
3558       // Adding positive and negative infinity produces NaN.
3559       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3560              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3561              (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3562               isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3563 
3564     case Instruction::FMul:
3565       // Zero multiplied with infinity produces NaN.
3566       // FIXME: If neither side can be zero fmul never produces NaN.
3567       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3568              isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3569              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3570              isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3571 
3572     case Instruction::FDiv:
3573     case Instruction::FRem:
3574       // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3575       return false;
3576 
3577     case Instruction::Select: {
3578       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3579              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3580     }
3581     case Instruction::SIToFP:
3582     case Instruction::UIToFP:
3583       return true;
3584     case Instruction::FPTrunc:
3585     case Instruction::FPExt:
3586       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3587     default:
3588       break;
3589     }
3590   }
3591 
3592   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3593     switch (II->getIntrinsicID()) {
3594     case Intrinsic::canonicalize:
3595     case Intrinsic::fabs:
3596     case Intrinsic::copysign:
3597     case Intrinsic::exp:
3598     case Intrinsic::exp2:
3599     case Intrinsic::floor:
3600     case Intrinsic::ceil:
3601     case Intrinsic::trunc:
3602     case Intrinsic::rint:
3603     case Intrinsic::nearbyint:
3604     case Intrinsic::round:
3605     case Intrinsic::roundeven:
3606       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3607     case Intrinsic::sqrt:
3608       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3609              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3610     case Intrinsic::minnum:
3611     case Intrinsic::maxnum:
3612       // If either operand is not NaN, the result is not NaN.
3613       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3614              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3615     default:
3616       return false;
3617     }
3618   }
3619 
3620   // Try to handle fixed width vector constants
3621   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3622   if (VFVTy && isa<Constant>(V)) {
3623     // For vectors, verify that each element is not NaN.
3624     unsigned NumElts = VFVTy->getNumElements();
3625     for (unsigned i = 0; i != NumElts; ++i) {
3626       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3627       if (!Elt)
3628         return false;
3629       if (isa<UndefValue>(Elt))
3630         continue;
3631       auto *CElt = dyn_cast<ConstantFP>(Elt);
3632       if (!CElt || CElt->isNaN())
3633         return false;
3634     }
3635     // All elements were confirmed not-NaN or undefined.
3636     return true;
3637   }
3638 
3639   // Was not able to prove that V never contains NaN
3640   return false;
3641 }
3642 
3643 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3644 
3645   // All byte-wide stores are splatable, even of arbitrary variables.
3646   if (V->getType()->isIntegerTy(8))
3647     return V;
3648 
3649   LLVMContext &Ctx = V->getContext();
3650 
3651   // Undef don't care.
3652   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3653   if (isa<UndefValue>(V))
3654     return UndefInt8;
3655 
3656   // Return Undef for zero-sized type.
3657   if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3658     return UndefInt8;
3659 
3660   Constant *C = dyn_cast<Constant>(V);
3661   if (!C) {
3662     // Conceptually, we could handle things like:
3663     //   %a = zext i8 %X to i16
3664     //   %b = shl i16 %a, 8
3665     //   %c = or i16 %a, %b
3666     // but until there is an example that actually needs this, it doesn't seem
3667     // worth worrying about.
3668     return nullptr;
3669   }
3670 
3671   // Handle 'null' ConstantArrayZero etc.
3672   if (C->isNullValue())
3673     return Constant::getNullValue(Type::getInt8Ty(Ctx));
3674 
3675   // Constant floating-point values can be handled as integer values if the
3676   // corresponding integer value is "byteable".  An important case is 0.0.
3677   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3678     Type *Ty = nullptr;
3679     if (CFP->getType()->isHalfTy())
3680       Ty = Type::getInt16Ty(Ctx);
3681     else if (CFP->getType()->isFloatTy())
3682       Ty = Type::getInt32Ty(Ctx);
3683     else if (CFP->getType()->isDoubleTy())
3684       Ty = Type::getInt64Ty(Ctx);
3685     // Don't handle long double formats, which have strange constraints.
3686     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3687               : nullptr;
3688   }
3689 
3690   // We can handle constant integers that are multiple of 8 bits.
3691   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3692     if (CI->getBitWidth() % 8 == 0) {
3693       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3694       if (!CI->getValue().isSplat(8))
3695         return nullptr;
3696       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3697     }
3698   }
3699 
3700   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3701     if (CE->getOpcode() == Instruction::IntToPtr) {
3702       auto PS = DL.getPointerSizeInBits(
3703           cast<PointerType>(CE->getType())->getAddressSpace());
3704       return isBytewiseValue(
3705           ConstantExpr::getIntegerCast(CE->getOperand(0),
3706                                        Type::getIntNTy(Ctx, PS), false),
3707           DL);
3708     }
3709   }
3710 
3711   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3712     if (LHS == RHS)
3713       return LHS;
3714     if (!LHS || !RHS)
3715       return nullptr;
3716     if (LHS == UndefInt8)
3717       return RHS;
3718     if (RHS == UndefInt8)
3719       return LHS;
3720     return nullptr;
3721   };
3722 
3723   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3724     Value *Val = UndefInt8;
3725     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3726       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3727         return nullptr;
3728     return Val;
3729   }
3730 
3731   if (isa<ConstantAggregate>(C)) {
3732     Value *Val = UndefInt8;
3733     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3734       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3735         return nullptr;
3736     return Val;
3737   }
3738 
3739   // Don't try to handle the handful of other constants.
3740   return nullptr;
3741 }
3742 
3743 // This is the recursive version of BuildSubAggregate. It takes a few different
3744 // arguments. Idxs is the index within the nested struct From that we are
3745 // looking at now (which is of type IndexedType). IdxSkip is the number of
3746 // indices from Idxs that should be left out when inserting into the resulting
3747 // struct. To is the result struct built so far, new insertvalue instructions
3748 // build on that.
3749 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3750                                 SmallVectorImpl<unsigned> &Idxs,
3751                                 unsigned IdxSkip,
3752                                 Instruction *InsertBefore) {
3753   StructType *STy = dyn_cast<StructType>(IndexedType);
3754   if (STy) {
3755     // Save the original To argument so we can modify it
3756     Value *OrigTo = To;
3757     // General case, the type indexed by Idxs is a struct
3758     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3759       // Process each struct element recursively
3760       Idxs.push_back(i);
3761       Value *PrevTo = To;
3762       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3763                              InsertBefore);
3764       Idxs.pop_back();
3765       if (!To) {
3766         // Couldn't find any inserted value for this index? Cleanup
3767         while (PrevTo != OrigTo) {
3768           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3769           PrevTo = Del->getAggregateOperand();
3770           Del->eraseFromParent();
3771         }
3772         // Stop processing elements
3773         break;
3774       }
3775     }
3776     // If we successfully found a value for each of our subaggregates
3777     if (To)
3778       return To;
3779   }
3780   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3781   // the struct's elements had a value that was inserted directly. In the latter
3782   // case, perhaps we can't determine each of the subelements individually, but
3783   // we might be able to find the complete struct somewhere.
3784 
3785   // Find the value that is at that particular spot
3786   Value *V = FindInsertedValue(From, Idxs);
3787 
3788   if (!V)
3789     return nullptr;
3790 
3791   // Insert the value in the new (sub) aggregate
3792   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3793                                  "tmp", InsertBefore);
3794 }
3795 
3796 // This helper takes a nested struct and extracts a part of it (which is again a
3797 // struct) into a new value. For example, given the struct:
3798 // { a, { b, { c, d }, e } }
3799 // and the indices "1, 1" this returns
3800 // { c, d }.
3801 //
3802 // It does this by inserting an insertvalue for each element in the resulting
3803 // struct, as opposed to just inserting a single struct. This will only work if
3804 // each of the elements of the substruct are known (ie, inserted into From by an
3805 // insertvalue instruction somewhere).
3806 //
3807 // All inserted insertvalue instructions are inserted before InsertBefore
3808 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3809                                 Instruction *InsertBefore) {
3810   assert(InsertBefore && "Must have someplace to insert!");
3811   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3812                                                              idx_range);
3813   Value *To = UndefValue::get(IndexedType);
3814   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3815   unsigned IdxSkip = Idxs.size();
3816 
3817   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3818 }
3819 
3820 /// Given an aggregate and a sequence of indices, see if the scalar value
3821 /// indexed is already around as a register, for example if it was inserted
3822 /// directly into the aggregate.
3823 ///
3824 /// If InsertBefore is not null, this function will duplicate (modified)
3825 /// insertvalues when a part of a nested struct is extracted.
3826 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3827                                Instruction *InsertBefore) {
3828   // Nothing to index? Just return V then (this is useful at the end of our
3829   // recursion).
3830   if (idx_range.empty())
3831     return V;
3832   // We have indices, so V should have an indexable type.
3833   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3834          "Not looking at a struct or array?");
3835   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3836          "Invalid indices for type?");
3837 
3838   if (Constant *C = dyn_cast<Constant>(V)) {
3839     C = C->getAggregateElement(idx_range[0]);
3840     if (!C) return nullptr;
3841     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3842   }
3843 
3844   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3845     // Loop the indices for the insertvalue instruction in parallel with the
3846     // requested indices
3847     const unsigned *req_idx = idx_range.begin();
3848     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3849          i != e; ++i, ++req_idx) {
3850       if (req_idx == idx_range.end()) {
3851         // We can't handle this without inserting insertvalues
3852         if (!InsertBefore)
3853           return nullptr;
3854 
3855         // The requested index identifies a part of a nested aggregate. Handle
3856         // this specially. For example,
3857         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3858         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3859         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3860         // This can be changed into
3861         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3862         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3863         // which allows the unused 0,0 element from the nested struct to be
3864         // removed.
3865         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3866                                  InsertBefore);
3867       }
3868 
3869       // This insert value inserts something else than what we are looking for.
3870       // See if the (aggregate) value inserted into has the value we are
3871       // looking for, then.
3872       if (*req_idx != *i)
3873         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3874                                  InsertBefore);
3875     }
3876     // If we end up here, the indices of the insertvalue match with those
3877     // requested (though possibly only partially). Now we recursively look at
3878     // the inserted value, passing any remaining indices.
3879     return FindInsertedValue(I->getInsertedValueOperand(),
3880                              makeArrayRef(req_idx, idx_range.end()),
3881                              InsertBefore);
3882   }
3883 
3884   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3885     // If we're extracting a value from an aggregate that was extracted from
3886     // something else, we can extract from that something else directly instead.
3887     // However, we will need to chain I's indices with the requested indices.
3888 
3889     // Calculate the number of indices required
3890     unsigned size = I->getNumIndices() + idx_range.size();
3891     // Allocate some space to put the new indices in
3892     SmallVector<unsigned, 5> Idxs;
3893     Idxs.reserve(size);
3894     // Add indices from the extract value instruction
3895     Idxs.append(I->idx_begin(), I->idx_end());
3896 
3897     // Add requested indices
3898     Idxs.append(idx_range.begin(), idx_range.end());
3899 
3900     assert(Idxs.size() == size
3901            && "Number of indices added not correct?");
3902 
3903     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3904   }
3905   // Otherwise, we don't know (such as, extracting from a function return value
3906   // or load instruction)
3907   return nullptr;
3908 }
3909 
3910 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3911                                        unsigned CharSize) {
3912   // Make sure the GEP has exactly three arguments.
3913   if (GEP->getNumOperands() != 3)
3914     return false;
3915 
3916   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3917   // CharSize.
3918   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3919   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3920     return false;
3921 
3922   // Check to make sure that the first operand of the GEP is an integer and
3923   // has value 0 so that we are sure we're indexing into the initializer.
3924   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3925   if (!FirstIdx || !FirstIdx->isZero())
3926     return false;
3927 
3928   return true;
3929 }
3930 
3931 bool llvm::getConstantDataArrayInfo(const Value *V,
3932                                     ConstantDataArraySlice &Slice,
3933                                     unsigned ElementSize, uint64_t Offset) {
3934   assert(V);
3935 
3936   // Look through bitcast instructions and geps.
3937   V = V->stripPointerCasts();
3938 
3939   // If the value is a GEP instruction or constant expression, treat it as an
3940   // offset.
3941   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3942     // The GEP operator should be based on a pointer to string constant, and is
3943     // indexing into the string constant.
3944     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3945       return false;
3946 
3947     // If the second index isn't a ConstantInt, then this is a variable index
3948     // into the array.  If this occurs, we can't say anything meaningful about
3949     // the string.
3950     uint64_t StartIdx = 0;
3951     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3952       StartIdx = CI->getZExtValue();
3953     else
3954       return false;
3955     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3956                                     StartIdx + Offset);
3957   }
3958 
3959   // The GEP instruction, constant or instruction, must reference a global
3960   // variable that is a constant and is initialized. The referenced constant
3961   // initializer is the array that we'll use for optimization.
3962   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3963   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3964     return false;
3965 
3966   const ConstantDataArray *Array;
3967   ArrayType *ArrayTy;
3968   if (GV->getInitializer()->isNullValue()) {
3969     Type *GVTy = GV->getValueType();
3970     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3971       // A zeroinitializer for the array; there is no ConstantDataArray.
3972       Array = nullptr;
3973     } else {
3974       const DataLayout &DL = GV->getParent()->getDataLayout();
3975       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
3976       uint64_t Length = SizeInBytes / (ElementSize / 8);
3977       if (Length <= Offset)
3978         return false;
3979 
3980       Slice.Array = nullptr;
3981       Slice.Offset = 0;
3982       Slice.Length = Length - Offset;
3983       return true;
3984     }
3985   } else {
3986     // This must be a ConstantDataArray.
3987     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3988     if (!Array)
3989       return false;
3990     ArrayTy = Array->getType();
3991   }
3992   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3993     return false;
3994 
3995   uint64_t NumElts = ArrayTy->getArrayNumElements();
3996   if (Offset > NumElts)
3997     return false;
3998 
3999   Slice.Array = Array;
4000   Slice.Offset = Offset;
4001   Slice.Length = NumElts - Offset;
4002   return true;
4003 }
4004 
4005 /// This function computes the length of a null-terminated C string pointed to
4006 /// by V. If successful, it returns true and returns the string in Str.
4007 /// If unsuccessful, it returns false.
4008 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4009                                  uint64_t Offset, bool TrimAtNul) {
4010   ConstantDataArraySlice Slice;
4011   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4012     return false;
4013 
4014   if (Slice.Array == nullptr) {
4015     if (TrimAtNul) {
4016       Str = StringRef();
4017       return true;
4018     }
4019     if (Slice.Length == 1) {
4020       Str = StringRef("", 1);
4021       return true;
4022     }
4023     // We cannot instantiate a StringRef as we do not have an appropriate string
4024     // of 0s at hand.
4025     return false;
4026   }
4027 
4028   // Start out with the entire array in the StringRef.
4029   Str = Slice.Array->getAsString();
4030   // Skip over 'offset' bytes.
4031   Str = Str.substr(Slice.Offset);
4032 
4033   if (TrimAtNul) {
4034     // Trim off the \0 and anything after it.  If the array is not nul
4035     // terminated, we just return the whole end of string.  The client may know
4036     // some other way that the string is length-bound.
4037     Str = Str.substr(0, Str.find('\0'));
4038   }
4039   return true;
4040 }
4041 
4042 // These next two are very similar to the above, but also look through PHI
4043 // nodes.
4044 // TODO: See if we can integrate these two together.
4045 
4046 /// If we can compute the length of the string pointed to by
4047 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4048 static uint64_t GetStringLengthH(const Value *V,
4049                                  SmallPtrSetImpl<const PHINode*> &PHIs,
4050                                  unsigned CharSize) {
4051   // Look through noop bitcast instructions.
4052   V = V->stripPointerCasts();
4053 
4054   // If this is a PHI node, there are two cases: either we have already seen it
4055   // or we haven't.
4056   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4057     if (!PHIs.insert(PN).second)
4058       return ~0ULL;  // already in the set.
4059 
4060     // If it was new, see if all the input strings are the same length.
4061     uint64_t LenSoFar = ~0ULL;
4062     for (Value *IncValue : PN->incoming_values()) {
4063       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4064       if (Len == 0) return 0; // Unknown length -> unknown.
4065 
4066       if (Len == ~0ULL) continue;
4067 
4068       if (Len != LenSoFar && LenSoFar != ~0ULL)
4069         return 0;    // Disagree -> unknown.
4070       LenSoFar = Len;
4071     }
4072 
4073     // Success, all agree.
4074     return LenSoFar;
4075   }
4076 
4077   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4078   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4079     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4080     if (Len1 == 0) return 0;
4081     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4082     if (Len2 == 0) return 0;
4083     if (Len1 == ~0ULL) return Len2;
4084     if (Len2 == ~0ULL) return Len1;
4085     if (Len1 != Len2) return 0;
4086     return Len1;
4087   }
4088 
4089   // Otherwise, see if we can read the string.
4090   ConstantDataArraySlice Slice;
4091   if (!getConstantDataArrayInfo(V, Slice, CharSize))
4092     return 0;
4093 
4094   if (Slice.Array == nullptr)
4095     return 1;
4096 
4097   // Search for nul characters
4098   unsigned NullIndex = 0;
4099   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4100     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4101       break;
4102   }
4103 
4104   return NullIndex + 1;
4105 }
4106 
4107 /// If we can compute the length of the string pointed to by
4108 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4109 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4110   if (!V->getType()->isPointerTy())
4111     return 0;
4112 
4113   SmallPtrSet<const PHINode*, 32> PHIs;
4114   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4115   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4116   // an empty string as a length.
4117   return Len == ~0ULL ? 1 : Len;
4118 }
4119 
4120 const Value *
4121 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4122                                            bool MustPreserveNullness) {
4123   assert(Call &&
4124          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4125   if (const Value *RV = Call->getReturnedArgOperand())
4126     return RV;
4127   // This can be used only as a aliasing property.
4128   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4129           Call, MustPreserveNullness))
4130     return Call->getArgOperand(0);
4131   return nullptr;
4132 }
4133 
4134 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4135     const CallBase *Call, bool MustPreserveNullness) {
4136   switch (Call->getIntrinsicID()) {
4137   case Intrinsic::launder_invariant_group:
4138   case Intrinsic::strip_invariant_group:
4139   case Intrinsic::aarch64_irg:
4140   case Intrinsic::aarch64_tagp:
4141     return true;
4142   case Intrinsic::ptrmask:
4143     return !MustPreserveNullness;
4144   default:
4145     return false;
4146   }
4147 }
4148 
4149 /// \p PN defines a loop-variant pointer to an object.  Check if the
4150 /// previous iteration of the loop was referring to the same object as \p PN.
4151 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4152                                          const LoopInfo *LI) {
4153   // Find the loop-defined value.
4154   Loop *L = LI->getLoopFor(PN->getParent());
4155   if (PN->getNumIncomingValues() != 2)
4156     return true;
4157 
4158   // Find the value from previous iteration.
4159   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4160   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4161     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4162   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4163     return true;
4164 
4165   // If a new pointer is loaded in the loop, the pointer references a different
4166   // object in every iteration.  E.g.:
4167   //    for (i)
4168   //       int *p = a[i];
4169   //       ...
4170   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4171     if (!L->isLoopInvariant(Load->getPointerOperand()))
4172       return false;
4173   return true;
4174 }
4175 
4176 Value *llvm::getUnderlyingObject(Value *V, unsigned MaxLookup) {
4177   if (!V->getType()->isPointerTy())
4178     return V;
4179   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4180     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4181       V = GEP->getPointerOperand();
4182     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4183                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4184       V = cast<Operator>(V)->getOperand(0);
4185       if (!V->getType()->isPointerTy())
4186         return V;
4187     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
4188       if (GA->isInterposable())
4189         return V;
4190       V = GA->getAliasee();
4191     } else {
4192       if (auto *PHI = dyn_cast<PHINode>(V)) {
4193         // Look through single-arg phi nodes created by LCSSA.
4194         if (PHI->getNumIncomingValues() == 1) {
4195           V = PHI->getIncomingValue(0);
4196           continue;
4197         }
4198       } else if (auto *Call = dyn_cast<CallBase>(V)) {
4199         // CaptureTracking can know about special capturing properties of some
4200         // intrinsics like launder.invariant.group, that can't be expressed with
4201         // the attributes, but have properties like returning aliasing pointer.
4202         // Because some analysis may assume that nocaptured pointer is not
4203         // returned from some special intrinsic (because function would have to
4204         // be marked with returns attribute), it is crucial to use this function
4205         // because it should be in sync with CaptureTracking. Not using it may
4206         // cause weird miscompilations where 2 aliasing pointers are assumed to
4207         // noalias.
4208         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4209           V = RP;
4210           continue;
4211         }
4212       }
4213 
4214       return V;
4215     }
4216     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4217   }
4218   return V;
4219 }
4220 
4221 void llvm::getUnderlyingObjects(const Value *V,
4222                                 SmallVectorImpl<const Value *> &Objects,
4223                                 LoopInfo *LI, unsigned MaxLookup) {
4224   SmallPtrSet<const Value *, 4> Visited;
4225   SmallVector<const Value *, 4> Worklist;
4226   Worklist.push_back(V);
4227   do {
4228     const Value *P = Worklist.pop_back_val();
4229     P = getUnderlyingObject(P, MaxLookup);
4230 
4231     if (!Visited.insert(P).second)
4232       continue;
4233 
4234     if (auto *SI = dyn_cast<SelectInst>(P)) {
4235       Worklist.push_back(SI->getTrueValue());
4236       Worklist.push_back(SI->getFalseValue());
4237       continue;
4238     }
4239 
4240     if (auto *PN = dyn_cast<PHINode>(P)) {
4241       // If this PHI changes the underlying object in every iteration of the
4242       // loop, don't look through it.  Consider:
4243       //   int **A;
4244       //   for (i) {
4245       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
4246       //     Curr = A[i];
4247       //     *Prev, *Curr;
4248       //
4249       // Prev is tracking Curr one iteration behind so they refer to different
4250       // underlying objects.
4251       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4252           isSameUnderlyingObjectInLoop(PN, LI))
4253         for (Value *IncValue : PN->incoming_values())
4254           Worklist.push_back(IncValue);
4255       continue;
4256     }
4257 
4258     Objects.push_back(P);
4259   } while (!Worklist.empty());
4260 }
4261 
4262 /// This is the function that does the work of looking through basic
4263 /// ptrtoint+arithmetic+inttoptr sequences.
4264 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4265   do {
4266     if (const Operator *U = dyn_cast<Operator>(V)) {
4267       // If we find a ptrtoint, we can transfer control back to the
4268       // regular getUnderlyingObjectFromInt.
4269       if (U->getOpcode() == Instruction::PtrToInt)
4270         return U->getOperand(0);
4271       // If we find an add of a constant, a multiplied value, or a phi, it's
4272       // likely that the other operand will lead us to the base
4273       // object. We don't have to worry about the case where the
4274       // object address is somehow being computed by the multiply,
4275       // because our callers only care when the result is an
4276       // identifiable object.
4277       if (U->getOpcode() != Instruction::Add ||
4278           (!isa<ConstantInt>(U->getOperand(1)) &&
4279            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4280            !isa<PHINode>(U->getOperand(1))))
4281         return V;
4282       V = U->getOperand(0);
4283     } else {
4284       return V;
4285     }
4286     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4287   } while (true);
4288 }
4289 
4290 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4291 /// ptrtoint+arithmetic+inttoptr sequences.
4292 /// It returns false if unidentified object is found in getUnderlyingObjects.
4293 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4294                                           SmallVectorImpl<Value *> &Objects) {
4295   SmallPtrSet<const Value *, 16> Visited;
4296   SmallVector<const Value *, 4> Working(1, V);
4297   do {
4298     V = Working.pop_back_val();
4299 
4300     SmallVector<const Value *, 4> Objs;
4301     getUnderlyingObjects(V, Objs);
4302 
4303     for (const Value *V : Objs) {
4304       if (!Visited.insert(V).second)
4305         continue;
4306       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4307         const Value *O =
4308           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4309         if (O->getType()->isPointerTy()) {
4310           Working.push_back(O);
4311           continue;
4312         }
4313       }
4314       // If getUnderlyingObjects fails to find an identifiable object,
4315       // getUnderlyingObjectsForCodeGen also fails for safety.
4316       if (!isIdentifiedObject(V)) {
4317         Objects.clear();
4318         return false;
4319       }
4320       Objects.push_back(const_cast<Value *>(V));
4321     }
4322   } while (!Working.empty());
4323   return true;
4324 }
4325 
4326 static AllocaInst *
4327 findAllocaForValue(Value *V, DenseMap<Value *, AllocaInst *> &AllocaForValue) {
4328   if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
4329     return AI;
4330   // See if we've already calculated (or started to calculate) alloca for a
4331   // given value.
4332   auto I = AllocaForValue.find(V);
4333   if (I != AllocaForValue.end())
4334     return I->second;
4335   // Store 0 while we're calculating alloca for value V to avoid
4336   // infinite recursion if the value references itself.
4337   AllocaForValue[V] = nullptr;
4338   AllocaInst *Res = nullptr;
4339   if (CastInst *CI = dyn_cast<CastInst>(V))
4340     Res = findAllocaForValue(CI->getOperand(0), AllocaForValue);
4341   else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4342     for (Value *IncValue : PN->incoming_values()) {
4343       // Allow self-referencing phi-nodes.
4344       if (IncValue == PN)
4345         continue;
4346       AllocaInst *IncValueAI = findAllocaForValue(IncValue, AllocaForValue);
4347       // AI for incoming values should exist and should all be equal.
4348       if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res))
4349         return nullptr;
4350       Res = IncValueAI;
4351     }
4352   } else if (GetElementPtrInst *EP = dyn_cast<GetElementPtrInst>(V)) {
4353     Res = findAllocaForValue(EP->getPointerOperand(), AllocaForValue);
4354   }
4355   if (Res)
4356     AllocaForValue[V] = Res;
4357   return Res;
4358 }
4359 
4360 AllocaInst *llvm::findAllocaForValue(Value *V) {
4361   DenseMap<Value *, AllocaInst *> AllocaForValue;
4362   return ::findAllocaForValue(V, AllocaForValue);
4363 }
4364 
4365 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4366     const Value *V, bool AllowLifetime, bool AllowDroppable) {
4367   for (const User *U : V->users()) {
4368     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4369     if (!II)
4370       return false;
4371 
4372     if (AllowLifetime && II->isLifetimeStartOrEnd())
4373       continue;
4374 
4375     if (AllowDroppable && II->isDroppable())
4376       continue;
4377 
4378     return false;
4379   }
4380   return true;
4381 }
4382 
4383 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4384   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4385       V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4386 }
4387 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4388   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4389       V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4390 }
4391 
4392 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4393   if (!LI.isUnordered())
4394     return true;
4395   const Function &F = *LI.getFunction();
4396   // Speculative load may create a race that did not exist in the source.
4397   return F.hasFnAttribute(Attribute::SanitizeThread) ||
4398     // Speculative load may load data from dirty regions.
4399     F.hasFnAttribute(Attribute::SanitizeAddress) ||
4400     F.hasFnAttribute(Attribute::SanitizeHWAddress);
4401 }
4402 
4403 
4404 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4405                                         const Instruction *CtxI,
4406                                         const DominatorTree *DT) {
4407   const Operator *Inst = dyn_cast<Operator>(V);
4408   if (!Inst)
4409     return false;
4410 
4411   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4412     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4413       if (C->canTrap())
4414         return false;
4415 
4416   switch (Inst->getOpcode()) {
4417   default:
4418     return true;
4419   case Instruction::UDiv:
4420   case Instruction::URem: {
4421     // x / y is undefined if y == 0.
4422     const APInt *V;
4423     if (match(Inst->getOperand(1), m_APInt(V)))
4424       return *V != 0;
4425     return false;
4426   }
4427   case Instruction::SDiv:
4428   case Instruction::SRem: {
4429     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4430     const APInt *Numerator, *Denominator;
4431     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4432       return false;
4433     // We cannot hoist this division if the denominator is 0.
4434     if (*Denominator == 0)
4435       return false;
4436     // It's safe to hoist if the denominator is not 0 or -1.
4437     if (*Denominator != -1)
4438       return true;
4439     // At this point we know that the denominator is -1.  It is safe to hoist as
4440     // long we know that the numerator is not INT_MIN.
4441     if (match(Inst->getOperand(0), m_APInt(Numerator)))
4442       return !Numerator->isMinSignedValue();
4443     // The numerator *might* be MinSignedValue.
4444     return false;
4445   }
4446   case Instruction::Load: {
4447     const LoadInst *LI = cast<LoadInst>(Inst);
4448     if (mustSuppressSpeculation(*LI))
4449       return false;
4450     const DataLayout &DL = LI->getModule()->getDataLayout();
4451     return isDereferenceableAndAlignedPointer(
4452         LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4453         DL, CtxI, DT);
4454   }
4455   case Instruction::Call: {
4456     auto *CI = cast<const CallInst>(Inst);
4457     const Function *Callee = CI->getCalledFunction();
4458 
4459     // The called function could have undefined behavior or side-effects, even
4460     // if marked readnone nounwind.
4461     return Callee && Callee->isSpeculatable();
4462   }
4463   case Instruction::VAArg:
4464   case Instruction::Alloca:
4465   case Instruction::Invoke:
4466   case Instruction::CallBr:
4467   case Instruction::PHI:
4468   case Instruction::Store:
4469   case Instruction::Ret:
4470   case Instruction::Br:
4471   case Instruction::IndirectBr:
4472   case Instruction::Switch:
4473   case Instruction::Unreachable:
4474   case Instruction::Fence:
4475   case Instruction::AtomicRMW:
4476   case Instruction::AtomicCmpXchg:
4477   case Instruction::LandingPad:
4478   case Instruction::Resume:
4479   case Instruction::CatchSwitch:
4480   case Instruction::CatchPad:
4481   case Instruction::CatchRet:
4482   case Instruction::CleanupPad:
4483   case Instruction::CleanupRet:
4484     return false; // Misc instructions which have effects
4485   }
4486 }
4487 
4488 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4489   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4490 }
4491 
4492 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4493 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4494   switch (OR) {
4495     case ConstantRange::OverflowResult::MayOverflow:
4496       return OverflowResult::MayOverflow;
4497     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4498       return OverflowResult::AlwaysOverflowsLow;
4499     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4500       return OverflowResult::AlwaysOverflowsHigh;
4501     case ConstantRange::OverflowResult::NeverOverflows:
4502       return OverflowResult::NeverOverflows;
4503   }
4504   llvm_unreachable("Unknown OverflowResult");
4505 }
4506 
4507 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4508 static ConstantRange computeConstantRangeIncludingKnownBits(
4509     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4510     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4511     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4512   KnownBits Known = computeKnownBits(
4513       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4514   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4515   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4516   ConstantRange::PreferredRangeType RangeType =
4517       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4518   return CR1.intersectWith(CR2, RangeType);
4519 }
4520 
4521 OverflowResult llvm::computeOverflowForUnsignedMul(
4522     const Value *LHS, const Value *RHS, const DataLayout &DL,
4523     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4524     bool UseInstrInfo) {
4525   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4526                                         nullptr, UseInstrInfo);
4527   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4528                                         nullptr, UseInstrInfo);
4529   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4530   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4531   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4532 }
4533 
4534 OverflowResult
4535 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4536                                   const DataLayout &DL, AssumptionCache *AC,
4537                                   const Instruction *CxtI,
4538                                   const DominatorTree *DT, bool UseInstrInfo) {
4539   // Multiplying n * m significant bits yields a result of n + m significant
4540   // bits. If the total number of significant bits does not exceed the
4541   // result bit width (minus 1), there is no overflow.
4542   // This means if we have enough leading sign bits in the operands
4543   // we can guarantee that the result does not overflow.
4544   // Ref: "Hacker's Delight" by Henry Warren
4545   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4546 
4547   // Note that underestimating the number of sign bits gives a more
4548   // conservative answer.
4549   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4550                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4551 
4552   // First handle the easy case: if we have enough sign bits there's
4553   // definitely no overflow.
4554   if (SignBits > BitWidth + 1)
4555     return OverflowResult::NeverOverflows;
4556 
4557   // There are two ambiguous cases where there can be no overflow:
4558   //   SignBits == BitWidth + 1    and
4559   //   SignBits == BitWidth
4560   // The second case is difficult to check, therefore we only handle the
4561   // first case.
4562   if (SignBits == BitWidth + 1) {
4563     // It overflows only when both arguments are negative and the true
4564     // product is exactly the minimum negative number.
4565     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4566     // For simplicity we just check if at least one side is not negative.
4567     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4568                                           nullptr, UseInstrInfo);
4569     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4570                                           nullptr, UseInstrInfo);
4571     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4572       return OverflowResult::NeverOverflows;
4573   }
4574   return OverflowResult::MayOverflow;
4575 }
4576 
4577 OverflowResult llvm::computeOverflowForUnsignedAdd(
4578     const Value *LHS, const Value *RHS, const DataLayout &DL,
4579     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4580     bool UseInstrInfo) {
4581   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4582       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4583       nullptr, UseInstrInfo);
4584   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4585       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4586       nullptr, UseInstrInfo);
4587   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4588 }
4589 
4590 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4591                                                   const Value *RHS,
4592                                                   const AddOperator *Add,
4593                                                   const DataLayout &DL,
4594                                                   AssumptionCache *AC,
4595                                                   const Instruction *CxtI,
4596                                                   const DominatorTree *DT) {
4597   if (Add && Add->hasNoSignedWrap()) {
4598     return OverflowResult::NeverOverflows;
4599   }
4600 
4601   // If LHS and RHS each have at least two sign bits, the addition will look
4602   // like
4603   //
4604   // XX..... +
4605   // YY.....
4606   //
4607   // If the carry into the most significant position is 0, X and Y can't both
4608   // be 1 and therefore the carry out of the addition is also 0.
4609   //
4610   // If the carry into the most significant position is 1, X and Y can't both
4611   // be 0 and therefore the carry out of the addition is also 1.
4612   //
4613   // Since the carry into the most significant position is always equal to
4614   // the carry out of the addition, there is no signed overflow.
4615   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4616       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4617     return OverflowResult::NeverOverflows;
4618 
4619   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4620       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4621   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4622       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4623   OverflowResult OR =
4624       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4625   if (OR != OverflowResult::MayOverflow)
4626     return OR;
4627 
4628   // The remaining code needs Add to be available. Early returns if not so.
4629   if (!Add)
4630     return OverflowResult::MayOverflow;
4631 
4632   // If the sign of Add is the same as at least one of the operands, this add
4633   // CANNOT overflow. If this can be determined from the known bits of the
4634   // operands the above signedAddMayOverflow() check will have already done so.
4635   // The only other way to improve on the known bits is from an assumption, so
4636   // call computeKnownBitsFromAssume() directly.
4637   bool LHSOrRHSKnownNonNegative =
4638       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4639   bool LHSOrRHSKnownNegative =
4640       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4641   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4642     KnownBits AddKnown(LHSRange.getBitWidth());
4643     computeKnownBitsFromAssume(
4644         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4645     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4646         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4647       return OverflowResult::NeverOverflows;
4648   }
4649 
4650   return OverflowResult::MayOverflow;
4651 }
4652 
4653 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4654                                                    const Value *RHS,
4655                                                    const DataLayout &DL,
4656                                                    AssumptionCache *AC,
4657                                                    const Instruction *CxtI,
4658                                                    const DominatorTree *DT) {
4659   // Checking for conditions implied by dominating conditions may be expensive.
4660   // Limit it to usub_with_overflow calls for now.
4661   if (match(CxtI,
4662             m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4663     if (auto C =
4664             isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4665       if (*C)
4666         return OverflowResult::NeverOverflows;
4667       return OverflowResult::AlwaysOverflowsLow;
4668     }
4669   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4670       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4671   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4672       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4673   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4674 }
4675 
4676 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4677                                                  const Value *RHS,
4678                                                  const DataLayout &DL,
4679                                                  AssumptionCache *AC,
4680                                                  const Instruction *CxtI,
4681                                                  const DominatorTree *DT) {
4682   // If LHS and RHS each have at least two sign bits, the subtraction
4683   // cannot overflow.
4684   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4685       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4686     return OverflowResult::NeverOverflows;
4687 
4688   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4689       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4690   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4691       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4692   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4693 }
4694 
4695 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4696                                      const DominatorTree &DT) {
4697   SmallVector<const BranchInst *, 2> GuardingBranches;
4698   SmallVector<const ExtractValueInst *, 2> Results;
4699 
4700   for (const User *U : WO->users()) {
4701     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4702       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4703 
4704       if (EVI->getIndices()[0] == 0)
4705         Results.push_back(EVI);
4706       else {
4707         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4708 
4709         for (const auto *U : EVI->users())
4710           if (const auto *B = dyn_cast<BranchInst>(U)) {
4711             assert(B->isConditional() && "How else is it using an i1?");
4712             GuardingBranches.push_back(B);
4713           }
4714       }
4715     } else {
4716       // We are using the aggregate directly in a way we don't want to analyze
4717       // here (storing it to a global, say).
4718       return false;
4719     }
4720   }
4721 
4722   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4723     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4724     if (!NoWrapEdge.isSingleEdge())
4725       return false;
4726 
4727     // Check if all users of the add are provably no-wrap.
4728     for (const auto *Result : Results) {
4729       // If the extractvalue itself is not executed on overflow, the we don't
4730       // need to check each use separately, since domination is transitive.
4731       if (DT.dominates(NoWrapEdge, Result->getParent()))
4732         continue;
4733 
4734       for (auto &RU : Result->uses())
4735         if (!DT.dominates(NoWrapEdge, RU))
4736           return false;
4737     }
4738 
4739     return true;
4740   };
4741 
4742   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4743 }
4744 
4745 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4746   // See whether I has flags that may create poison
4747   if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4748     if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4749       return true;
4750   }
4751   if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4752     if (ExactOp->isExact())
4753       return true;
4754   if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4755     auto FMF = FP->getFastMathFlags();
4756     if (FMF.noNaNs() || FMF.noInfs())
4757       return true;
4758   }
4759 
4760   unsigned Opcode = Op->getOpcode();
4761 
4762   // Check whether opcode is a poison/undef-generating operation
4763   switch (Opcode) {
4764   case Instruction::Shl:
4765   case Instruction::AShr:
4766   case Instruction::LShr: {
4767     // Shifts return poison if shiftwidth is larger than the bitwidth.
4768     if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4769       SmallVector<Constant *, 4> ShiftAmounts;
4770       if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4771         unsigned NumElts = FVTy->getNumElements();
4772         for (unsigned i = 0; i < NumElts; ++i)
4773           ShiftAmounts.push_back(C->getAggregateElement(i));
4774       } else if (isa<ScalableVectorType>(C->getType()))
4775         return true; // Can't tell, just return true to be safe
4776       else
4777         ShiftAmounts.push_back(C);
4778 
4779       bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4780         auto *CI = dyn_cast<ConstantInt>(C);
4781         return CI && CI->getZExtValue() < C->getType()->getIntegerBitWidth();
4782       });
4783       return !Safe;
4784     }
4785     return true;
4786   }
4787   case Instruction::FPToSI:
4788   case Instruction::FPToUI:
4789     // fptosi/ui yields poison if the resulting value does not fit in the
4790     // destination type.
4791     return true;
4792   case Instruction::Call:
4793   case Instruction::CallBr:
4794   case Instruction::Invoke: {
4795     const auto *CB = cast<CallBase>(Op);
4796     return !CB->hasRetAttr(Attribute::NoUndef);
4797   }
4798   case Instruction::InsertElement:
4799   case Instruction::ExtractElement: {
4800     // If index exceeds the length of the vector, it returns poison
4801     auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4802     unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4803     auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4804     if (!Idx || Idx->getZExtValue() >= VTy->getElementCount().Min)
4805       return true;
4806     return false;
4807   }
4808   case Instruction::ShuffleVector: {
4809     // shufflevector may return undef.
4810     if (PoisonOnly)
4811       return false;
4812     ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4813                              ? cast<ConstantExpr>(Op)->getShuffleMask()
4814                              : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4815     return any_of(Mask, [](int Elt) { return Elt == UndefMaskElem; });
4816   }
4817   case Instruction::FNeg:
4818   case Instruction::PHI:
4819   case Instruction::Select:
4820   case Instruction::URem:
4821   case Instruction::SRem:
4822   case Instruction::ExtractValue:
4823   case Instruction::InsertValue:
4824   case Instruction::Freeze:
4825   case Instruction::ICmp:
4826   case Instruction::FCmp:
4827     return false;
4828   case Instruction::GetElementPtr: {
4829     const auto *GEP = cast<GEPOperator>(Op);
4830     return GEP->isInBounds();
4831   }
4832   default: {
4833     const auto *CE = dyn_cast<ConstantExpr>(Op);
4834     if (isa<CastInst>(Op) || (CE && CE->isCast()))
4835       return false;
4836     else if (Instruction::isBinaryOp(Opcode))
4837       return false;
4838     // Be conservative and return true.
4839     return true;
4840   }
4841   }
4842 }
4843 
4844 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
4845   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
4846 }
4847 
4848 bool llvm::canCreatePoison(const Operator *Op) {
4849   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
4850 }
4851 
4852 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V,
4853                                             const Instruction *CtxI,
4854                                             const DominatorTree *DT,
4855                                             unsigned Depth) {
4856   if (Depth >= MaxDepth)
4857     return false;
4858 
4859   if (const auto *A = dyn_cast<Argument>(V)) {
4860     if (A->hasAttribute(Attribute::NoUndef))
4861       return true;
4862   }
4863 
4864   if (auto *C = dyn_cast<Constant>(V)) {
4865     if (isa<UndefValue>(C))
4866       return false;
4867 
4868     if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
4869         isa<ConstantPointerNull>(C) || isa<Function>(C))
4870       return true;
4871 
4872     if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
4873       return !C->containsConstantExpression() && !C->containsUndefElement();
4874   }
4875 
4876   // Strip cast operations from a pointer value.
4877   // Note that stripPointerCastsSameRepresentation can strip off getelementptr
4878   // inbounds with zero offset. To guarantee that the result isn't poison, the
4879   // stripped pointer is checked as it has to be pointing into an allocated
4880   // object or be null `null` to ensure `inbounds` getelement pointers with a
4881   // zero offset could not produce poison.
4882   // It can strip off addrspacecast that do not change bit representation as
4883   // well. We believe that such addrspacecast is equivalent to no-op.
4884   auto *StrippedV = V->stripPointerCastsSameRepresentation();
4885   if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
4886       isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
4887     return true;
4888 
4889   auto OpCheck = [&](const Value *V) {
4890     return isGuaranteedNotToBeUndefOrPoison(V, CtxI, DT, Depth + 1);
4891   };
4892 
4893   if (auto *Opr = dyn_cast<Operator>(V)) {
4894     // If the value is a freeze instruction, then it can never
4895     // be undef or poison.
4896     if (isa<FreezeInst>(V))
4897       return true;
4898 
4899     if (const auto *CB = dyn_cast<CallBase>(V)) {
4900       if (CB->hasRetAttr(Attribute::NoUndef))
4901         return true;
4902     }
4903 
4904     if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
4905       return true;
4906   }
4907 
4908   if (auto *I = dyn_cast<Instruction>(V)) {
4909     if (programUndefinedIfPoison(I) && I->getType()->isIntegerTy(1))
4910       // Note: once we have an agreement that poison is a value-wise concept,
4911       // we can remove the isIntegerTy(1) constraint.
4912       return true;
4913   }
4914 
4915   // CxtI may be null or a cloned instruction.
4916   if (!CtxI || !CtxI->getParent() || !DT)
4917     return false;
4918 
4919   auto *DNode = DT->getNode(CtxI->getParent());
4920   if (!DNode)
4921     // Unreachable block
4922     return false;
4923 
4924   // If V is used as a branch condition before reaching CtxI, V cannot be
4925   // undef or poison.
4926   //   br V, BB1, BB2
4927   // BB1:
4928   //   CtxI ; V cannot be undef or poison here
4929   auto *Dominator = DNode->getIDom();
4930   while (Dominator) {
4931     auto *TI = Dominator->getBlock()->getTerminator();
4932 
4933     if (auto BI = dyn_cast<BranchInst>(TI)) {
4934       if (BI->isConditional() && BI->getCondition() == V)
4935         return true;
4936     } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
4937       if (SI->getCondition() == V)
4938         return true;
4939     }
4940 
4941     Dominator = Dominator->getIDom();
4942   }
4943 
4944   return false;
4945 }
4946 
4947 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
4948                                                  const DataLayout &DL,
4949                                                  AssumptionCache *AC,
4950                                                  const Instruction *CxtI,
4951                                                  const DominatorTree *DT) {
4952   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
4953                                        Add, DL, AC, CxtI, DT);
4954 }
4955 
4956 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
4957                                                  const Value *RHS,
4958                                                  const DataLayout &DL,
4959                                                  AssumptionCache *AC,
4960                                                  const Instruction *CxtI,
4961                                                  const DominatorTree *DT) {
4962   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4963 }
4964 
4965 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
4966   // Note: An atomic operation isn't guaranteed to return in a reasonable amount
4967   // of time because it's possible for another thread to interfere with it for an
4968   // arbitrary length of time, but programs aren't allowed to rely on that.
4969 
4970   // If there is no successor, then execution can't transfer to it.
4971   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4972     return !CRI->unwindsToCaller();
4973   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4974     return !CatchSwitch->unwindsToCaller();
4975   if (isa<ResumeInst>(I))
4976     return false;
4977   if (isa<ReturnInst>(I))
4978     return false;
4979   if (isa<UnreachableInst>(I))
4980     return false;
4981 
4982   // Calls can throw, or contain an infinite loop, or kill the process.
4983   if (const auto *CB = dyn_cast<CallBase>(I)) {
4984     // Call sites that throw have implicit non-local control flow.
4985     if (!CB->doesNotThrow())
4986       return false;
4987 
4988     // A function which doens't throw and has "willreturn" attribute will
4989     // always return.
4990     if (CB->hasFnAttr(Attribute::WillReturn))
4991       return true;
4992 
4993     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4994     // etc. and thus not return.  However, LLVM already assumes that
4995     //
4996     //  - Thread exiting actions are modeled as writes to memory invisible to
4997     //    the program.
4998     //
4999     //  - Loops that don't have side effects (side effects are volatile/atomic
5000     //    stores and IO) always terminate (see http://llvm.org/PR965).
5001     //    Furthermore IO itself is also modeled as writes to memory invisible to
5002     //    the program.
5003     //
5004     // We rely on those assumptions here, and use the memory effects of the call
5005     // target as a proxy for checking that it always returns.
5006 
5007     // FIXME: This isn't aggressive enough; a call which only writes to a global
5008     // is guaranteed to return.
5009     return CB->onlyReadsMemory() || CB->onlyAccessesArgMemory();
5010   }
5011 
5012   // Other instructions return normally.
5013   return true;
5014 }
5015 
5016 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5017   // TODO: This is slightly conservative for invoke instruction since exiting
5018   // via an exception *is* normal control for them.
5019   for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
5020     if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
5021       return false;
5022   return true;
5023 }
5024 
5025 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5026                                                   const Loop *L) {
5027   // The loop header is guaranteed to be executed for every iteration.
5028   //
5029   // FIXME: Relax this constraint to cover all basic blocks that are
5030   // guaranteed to be executed at every iteration.
5031   if (I->getParent() != L->getHeader()) return false;
5032 
5033   for (const Instruction &LI : *L->getHeader()) {
5034     if (&LI == I) return true;
5035     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5036   }
5037   llvm_unreachable("Instruction not contained in its own parent basic block.");
5038 }
5039 
5040 bool llvm::propagatesPoison(const Instruction *I) {
5041   switch (I->getOpcode()) {
5042   case Instruction::Freeze:
5043   case Instruction::Select:
5044   case Instruction::PHI:
5045   case Instruction::Call:
5046   case Instruction::Invoke:
5047     return false;
5048   case Instruction::ICmp:
5049   case Instruction::FCmp:
5050   case Instruction::GetElementPtr:
5051     return true;
5052   default:
5053     if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5054       return true;
5055 
5056     // Be conservative and return false.
5057     return false;
5058   }
5059 }
5060 
5061 const Value *llvm::getGuaranteedNonPoisonOp(const Instruction *I) {
5062   switch (I->getOpcode()) {
5063     case Instruction::Store:
5064       return cast<StoreInst>(I)->getPointerOperand();
5065 
5066     case Instruction::Load:
5067       return cast<LoadInst>(I)->getPointerOperand();
5068 
5069     case Instruction::AtomicCmpXchg:
5070       return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
5071 
5072     case Instruction::AtomicRMW:
5073       return cast<AtomicRMWInst>(I)->getPointerOperand();
5074 
5075     case Instruction::UDiv:
5076     case Instruction::SDiv:
5077     case Instruction::URem:
5078     case Instruction::SRem:
5079       return I->getOperand(1);
5080 
5081     case Instruction::Call:
5082       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5083         switch (II->getIntrinsicID()) {
5084         case Intrinsic::assume:
5085           return II->getArgOperand(0);
5086         default:
5087           return nullptr;
5088         }
5089       }
5090       return nullptr;
5091 
5092     default:
5093       return nullptr;
5094   }
5095 }
5096 
5097 bool llvm::mustTriggerUB(const Instruction *I,
5098                          const SmallSet<const Value *, 16>& KnownPoison) {
5099   auto *NotPoison = getGuaranteedNonPoisonOp(I);
5100   return (NotPoison && KnownPoison.count(NotPoison));
5101 }
5102 
5103 
5104 bool llvm::programUndefinedIfPoison(const Instruction *PoisonI) {
5105   // We currently only look for uses of poison values within the same basic
5106   // block, as that makes it easier to guarantee that the uses will be
5107   // executed given that PoisonI is executed.
5108   //
5109   // FIXME: Expand this to consider uses beyond the same basic block. To do
5110   // this, look out for the distinction between post-dominance and strong
5111   // post-dominance.
5112   const BasicBlock *BB = PoisonI->getParent();
5113 
5114   // Set of instructions that we have proved will yield poison if PoisonI
5115   // does.
5116   SmallSet<const Value *, 16> YieldsPoison;
5117   SmallSet<const BasicBlock *, 4> Visited;
5118   YieldsPoison.insert(PoisonI);
5119   Visited.insert(PoisonI->getParent());
5120 
5121   BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
5122 
5123   unsigned Iter = 0;
5124   while (Iter++ < MaxDepth) {
5125     for (auto &I : make_range(Begin, End)) {
5126       if (&I != PoisonI) {
5127         if (mustTriggerUB(&I, YieldsPoison))
5128           return true;
5129         if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5130           return false;
5131       }
5132 
5133       // Mark poison that propagates from I through uses of I.
5134       if (YieldsPoison.count(&I)) {
5135         for (const User *User : I.users()) {
5136           const Instruction *UserI = cast<Instruction>(User);
5137           if (propagatesPoison(UserI))
5138             YieldsPoison.insert(User);
5139         }
5140       }
5141     }
5142 
5143     if (auto *NextBB = BB->getSingleSuccessor()) {
5144       if (Visited.insert(NextBB).second) {
5145         BB = NextBB;
5146         Begin = BB->getFirstNonPHI()->getIterator();
5147         End = BB->end();
5148         continue;
5149       }
5150     }
5151 
5152     break;
5153   }
5154   return false;
5155 }
5156 
5157 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5158   if (FMF.noNaNs())
5159     return true;
5160 
5161   if (auto *C = dyn_cast<ConstantFP>(V))
5162     return !C->isNaN();
5163 
5164   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5165     if (!C->getElementType()->isFloatingPointTy())
5166       return false;
5167     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5168       if (C->getElementAsAPFloat(I).isNaN())
5169         return false;
5170     }
5171     return true;
5172   }
5173 
5174   if (isa<ConstantAggregateZero>(V))
5175     return true;
5176 
5177   return false;
5178 }
5179 
5180 static bool isKnownNonZero(const Value *V) {
5181   if (auto *C = dyn_cast<ConstantFP>(V))
5182     return !C->isZero();
5183 
5184   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5185     if (!C->getElementType()->isFloatingPointTy())
5186       return false;
5187     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5188       if (C->getElementAsAPFloat(I).isZero())
5189         return false;
5190     }
5191     return true;
5192   }
5193 
5194   return false;
5195 }
5196 
5197 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5198 /// Given non-min/max outer cmp/select from the clamp pattern this
5199 /// function recognizes if it can be substitued by a "canonical" min/max
5200 /// pattern.
5201 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5202                                                Value *CmpLHS, Value *CmpRHS,
5203                                                Value *TrueVal, Value *FalseVal,
5204                                                Value *&LHS, Value *&RHS) {
5205   // Try to match
5206   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5207   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5208   // and return description of the outer Max/Min.
5209 
5210   // First, check if select has inverse order:
5211   if (CmpRHS == FalseVal) {
5212     std::swap(TrueVal, FalseVal);
5213     Pred = CmpInst::getInversePredicate(Pred);
5214   }
5215 
5216   // Assume success now. If there's no match, callers should not use these anyway.
5217   LHS = TrueVal;
5218   RHS = FalseVal;
5219 
5220   const APFloat *FC1;
5221   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5222     return {SPF_UNKNOWN, SPNB_NA, false};
5223 
5224   const APFloat *FC2;
5225   switch (Pred) {
5226   case CmpInst::FCMP_OLT:
5227   case CmpInst::FCMP_OLE:
5228   case CmpInst::FCMP_ULT:
5229   case CmpInst::FCMP_ULE:
5230     if (match(FalseVal,
5231               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5232                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5233         *FC1 < *FC2)
5234       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5235     break;
5236   case CmpInst::FCMP_OGT:
5237   case CmpInst::FCMP_OGE:
5238   case CmpInst::FCMP_UGT:
5239   case CmpInst::FCMP_UGE:
5240     if (match(FalseVal,
5241               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5242                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5243         *FC1 > *FC2)
5244       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5245     break;
5246   default:
5247     break;
5248   }
5249 
5250   return {SPF_UNKNOWN, SPNB_NA, false};
5251 }
5252 
5253 /// Recognize variations of:
5254 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5255 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5256                                       Value *CmpLHS, Value *CmpRHS,
5257                                       Value *TrueVal, Value *FalseVal) {
5258   // Swap the select operands and predicate to match the patterns below.
5259   if (CmpRHS != TrueVal) {
5260     Pred = ICmpInst::getSwappedPredicate(Pred);
5261     std::swap(TrueVal, FalseVal);
5262   }
5263   const APInt *C1;
5264   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5265     const APInt *C2;
5266     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5267     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5268         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5269       return {SPF_SMAX, SPNB_NA, false};
5270 
5271     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5272     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5273         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5274       return {SPF_SMIN, SPNB_NA, false};
5275 
5276     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5277     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5278         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5279       return {SPF_UMAX, SPNB_NA, false};
5280 
5281     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5282     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5283         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5284       return {SPF_UMIN, SPNB_NA, false};
5285   }
5286   return {SPF_UNKNOWN, SPNB_NA, false};
5287 }
5288 
5289 /// Recognize variations of:
5290 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5291 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5292                                                Value *CmpLHS, Value *CmpRHS,
5293                                                Value *TVal, Value *FVal,
5294                                                unsigned Depth) {
5295   // TODO: Allow FP min/max with nnan/nsz.
5296   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5297 
5298   Value *A = nullptr, *B = nullptr;
5299   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5300   if (!SelectPatternResult::isMinOrMax(L.Flavor))
5301     return {SPF_UNKNOWN, SPNB_NA, false};
5302 
5303   Value *C = nullptr, *D = nullptr;
5304   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5305   if (L.Flavor != R.Flavor)
5306     return {SPF_UNKNOWN, SPNB_NA, false};
5307 
5308   // We have something like: x Pred y ? min(a, b) : min(c, d).
5309   // Try to match the compare to the min/max operations of the select operands.
5310   // First, make sure we have the right compare predicate.
5311   switch (L.Flavor) {
5312   case SPF_SMIN:
5313     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5314       Pred = ICmpInst::getSwappedPredicate(Pred);
5315       std::swap(CmpLHS, CmpRHS);
5316     }
5317     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5318       break;
5319     return {SPF_UNKNOWN, SPNB_NA, false};
5320   case SPF_SMAX:
5321     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5322       Pred = ICmpInst::getSwappedPredicate(Pred);
5323       std::swap(CmpLHS, CmpRHS);
5324     }
5325     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5326       break;
5327     return {SPF_UNKNOWN, SPNB_NA, false};
5328   case SPF_UMIN:
5329     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5330       Pred = ICmpInst::getSwappedPredicate(Pred);
5331       std::swap(CmpLHS, CmpRHS);
5332     }
5333     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5334       break;
5335     return {SPF_UNKNOWN, SPNB_NA, false};
5336   case SPF_UMAX:
5337     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5338       Pred = ICmpInst::getSwappedPredicate(Pred);
5339       std::swap(CmpLHS, CmpRHS);
5340     }
5341     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5342       break;
5343     return {SPF_UNKNOWN, SPNB_NA, false};
5344   default:
5345     return {SPF_UNKNOWN, SPNB_NA, false};
5346   }
5347 
5348   // If there is a common operand in the already matched min/max and the other
5349   // min/max operands match the compare operands (either directly or inverted),
5350   // then this is min/max of the same flavor.
5351 
5352   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5353   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5354   if (D == B) {
5355     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5356                                          match(A, m_Not(m_Specific(CmpRHS)))))
5357       return {L.Flavor, SPNB_NA, false};
5358   }
5359   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5360   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5361   if (C == B) {
5362     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5363                                          match(A, m_Not(m_Specific(CmpRHS)))))
5364       return {L.Flavor, SPNB_NA, false};
5365   }
5366   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5367   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5368   if (D == A) {
5369     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5370                                          match(B, m_Not(m_Specific(CmpRHS)))))
5371       return {L.Flavor, SPNB_NA, false};
5372   }
5373   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5374   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5375   if (C == A) {
5376     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5377                                          match(B, m_Not(m_Specific(CmpRHS)))))
5378       return {L.Flavor, SPNB_NA, false};
5379   }
5380 
5381   return {SPF_UNKNOWN, SPNB_NA, false};
5382 }
5383 
5384 /// If the input value is the result of a 'not' op, constant integer, or vector
5385 /// splat of a constant integer, return the bitwise-not source value.
5386 /// TODO: This could be extended to handle non-splat vector integer constants.
5387 static Value *getNotValue(Value *V) {
5388   Value *NotV;
5389   if (match(V, m_Not(m_Value(NotV))))
5390     return NotV;
5391 
5392   const APInt *C;
5393   if (match(V, m_APInt(C)))
5394     return ConstantInt::get(V->getType(), ~(*C));
5395 
5396   return nullptr;
5397 }
5398 
5399 /// Match non-obvious integer minimum and maximum sequences.
5400 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5401                                        Value *CmpLHS, Value *CmpRHS,
5402                                        Value *TrueVal, Value *FalseVal,
5403                                        Value *&LHS, Value *&RHS,
5404                                        unsigned Depth) {
5405   // Assume success. If there's no match, callers should not use these anyway.
5406   LHS = TrueVal;
5407   RHS = FalseVal;
5408 
5409   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5410   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5411     return SPR;
5412 
5413   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5414   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5415     return SPR;
5416 
5417   // Look through 'not' ops to find disguised min/max.
5418   // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5419   // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5420   if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5421     switch (Pred) {
5422     case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5423     case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5424     case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5425     case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5426     default: break;
5427     }
5428   }
5429 
5430   // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5431   // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5432   if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5433     switch (Pred) {
5434     case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5435     case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5436     case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5437     case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5438     default: break;
5439     }
5440   }
5441 
5442   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5443     return {SPF_UNKNOWN, SPNB_NA, false};
5444 
5445   // Z = X -nsw Y
5446   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5447   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5448   if (match(TrueVal, m_Zero()) &&
5449       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5450     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5451 
5452   // Z = X -nsw Y
5453   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5454   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5455   if (match(FalseVal, m_Zero()) &&
5456       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5457     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5458 
5459   const APInt *C1;
5460   if (!match(CmpRHS, m_APInt(C1)))
5461     return {SPF_UNKNOWN, SPNB_NA, false};
5462 
5463   // An unsigned min/max can be written with a signed compare.
5464   const APInt *C2;
5465   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5466       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5467     // Is the sign bit set?
5468     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5469     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5470     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5471         C2->isMaxSignedValue())
5472       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5473 
5474     // Is the sign bit clear?
5475     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5476     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5477     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5478         C2->isMinSignedValue())
5479       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5480   }
5481 
5482   return {SPF_UNKNOWN, SPNB_NA, false};
5483 }
5484 
5485 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5486   assert(X && Y && "Invalid operand");
5487 
5488   // X = sub (0, Y) || X = sub nsw (0, Y)
5489   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5490       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5491     return true;
5492 
5493   // Y = sub (0, X) || Y = sub nsw (0, X)
5494   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5495       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5496     return true;
5497 
5498   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5499   Value *A, *B;
5500   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5501                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5502          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5503                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5504 }
5505 
5506 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5507                                               FastMathFlags FMF,
5508                                               Value *CmpLHS, Value *CmpRHS,
5509                                               Value *TrueVal, Value *FalseVal,
5510                                               Value *&LHS, Value *&RHS,
5511                                               unsigned Depth) {
5512   if (CmpInst::isFPPredicate(Pred)) {
5513     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5514     // 0.0 operand, set the compare's 0.0 operands to that same value for the
5515     // purpose of identifying min/max. Disregard vector constants with undefined
5516     // elements because those can not be back-propagated for analysis.
5517     Value *OutputZeroVal = nullptr;
5518     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5519         !cast<Constant>(TrueVal)->containsUndefElement())
5520       OutputZeroVal = TrueVal;
5521     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5522              !cast<Constant>(FalseVal)->containsUndefElement())
5523       OutputZeroVal = FalseVal;
5524 
5525     if (OutputZeroVal) {
5526       if (match(CmpLHS, m_AnyZeroFP()))
5527         CmpLHS = OutputZeroVal;
5528       if (match(CmpRHS, m_AnyZeroFP()))
5529         CmpRHS = OutputZeroVal;
5530     }
5531   }
5532 
5533   LHS = CmpLHS;
5534   RHS = CmpRHS;
5535 
5536   // Signed zero may return inconsistent results between implementations.
5537   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5538   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5539   // Therefore, we behave conservatively and only proceed if at least one of the
5540   // operands is known to not be zero or if we don't care about signed zero.
5541   switch (Pred) {
5542   default: break;
5543   // FIXME: Include OGT/OLT/UGT/ULT.
5544   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5545   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5546     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5547         !isKnownNonZero(CmpRHS))
5548       return {SPF_UNKNOWN, SPNB_NA, false};
5549   }
5550 
5551   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5552   bool Ordered = false;
5553 
5554   // When given one NaN and one non-NaN input:
5555   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5556   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5557   //     ordered comparison fails), which could be NaN or non-NaN.
5558   // so here we discover exactly what NaN behavior is required/accepted.
5559   if (CmpInst::isFPPredicate(Pred)) {
5560     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5561     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5562 
5563     if (LHSSafe && RHSSafe) {
5564       // Both operands are known non-NaN.
5565       NaNBehavior = SPNB_RETURNS_ANY;
5566     } else if (CmpInst::isOrdered(Pred)) {
5567       // An ordered comparison will return false when given a NaN, so it
5568       // returns the RHS.
5569       Ordered = true;
5570       if (LHSSafe)
5571         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5572         NaNBehavior = SPNB_RETURNS_NAN;
5573       else if (RHSSafe)
5574         NaNBehavior = SPNB_RETURNS_OTHER;
5575       else
5576         // Completely unsafe.
5577         return {SPF_UNKNOWN, SPNB_NA, false};
5578     } else {
5579       Ordered = false;
5580       // An unordered comparison will return true when given a NaN, so it
5581       // returns the LHS.
5582       if (LHSSafe)
5583         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5584         NaNBehavior = SPNB_RETURNS_OTHER;
5585       else if (RHSSafe)
5586         NaNBehavior = SPNB_RETURNS_NAN;
5587       else
5588         // Completely unsafe.
5589         return {SPF_UNKNOWN, SPNB_NA, false};
5590     }
5591   }
5592 
5593   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5594     std::swap(CmpLHS, CmpRHS);
5595     Pred = CmpInst::getSwappedPredicate(Pred);
5596     if (NaNBehavior == SPNB_RETURNS_NAN)
5597       NaNBehavior = SPNB_RETURNS_OTHER;
5598     else if (NaNBehavior == SPNB_RETURNS_OTHER)
5599       NaNBehavior = SPNB_RETURNS_NAN;
5600     Ordered = !Ordered;
5601   }
5602 
5603   // ([if]cmp X, Y) ? X : Y
5604   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5605     switch (Pred) {
5606     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5607     case ICmpInst::ICMP_UGT:
5608     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5609     case ICmpInst::ICMP_SGT:
5610     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5611     case ICmpInst::ICMP_ULT:
5612     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5613     case ICmpInst::ICMP_SLT:
5614     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5615     case FCmpInst::FCMP_UGT:
5616     case FCmpInst::FCMP_UGE:
5617     case FCmpInst::FCMP_OGT:
5618     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5619     case FCmpInst::FCMP_ULT:
5620     case FCmpInst::FCMP_ULE:
5621     case FCmpInst::FCMP_OLT:
5622     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5623     }
5624   }
5625 
5626   if (isKnownNegation(TrueVal, FalseVal)) {
5627     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5628     // match against either LHS or sext(LHS).
5629     auto MaybeSExtCmpLHS =
5630         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5631     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5632     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5633     if (match(TrueVal, MaybeSExtCmpLHS)) {
5634       // Set the return values. If the compare uses the negated value (-X >s 0),
5635       // swap the return values because the negated value is always 'RHS'.
5636       LHS = TrueVal;
5637       RHS = FalseVal;
5638       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5639         std::swap(LHS, RHS);
5640 
5641       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5642       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5643       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5644         return {SPF_ABS, SPNB_NA, false};
5645 
5646       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5647       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5648         return {SPF_ABS, SPNB_NA, false};
5649 
5650       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5651       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5652       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5653         return {SPF_NABS, SPNB_NA, false};
5654     }
5655     else if (match(FalseVal, MaybeSExtCmpLHS)) {
5656       // Set the return values. If the compare uses the negated value (-X >s 0),
5657       // swap the return values because the negated value is always 'RHS'.
5658       LHS = FalseVal;
5659       RHS = TrueVal;
5660       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5661         std::swap(LHS, RHS);
5662 
5663       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5664       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5665       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5666         return {SPF_NABS, SPNB_NA, false};
5667 
5668       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5669       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5670       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5671         return {SPF_ABS, SPNB_NA, false};
5672     }
5673   }
5674 
5675   if (CmpInst::isIntPredicate(Pred))
5676     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5677 
5678   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5679   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5680   // semantics than minNum. Be conservative in such case.
5681   if (NaNBehavior != SPNB_RETURNS_ANY ||
5682       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5683        !isKnownNonZero(CmpRHS)))
5684     return {SPF_UNKNOWN, SPNB_NA, false};
5685 
5686   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5687 }
5688 
5689 /// Helps to match a select pattern in case of a type mismatch.
5690 ///
5691 /// The function processes the case when type of true and false values of a
5692 /// select instruction differs from type of the cmp instruction operands because
5693 /// of a cast instruction. The function checks if it is legal to move the cast
5694 /// operation after "select". If yes, it returns the new second value of
5695 /// "select" (with the assumption that cast is moved):
5696 /// 1. As operand of cast instruction when both values of "select" are same cast
5697 /// instructions.
5698 /// 2. As restored constant (by applying reverse cast operation) when the first
5699 /// value of the "select" is a cast operation and the second value is a
5700 /// constant.
5701 /// NOTE: We return only the new second value because the first value could be
5702 /// accessed as operand of cast instruction.
5703 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
5704                               Instruction::CastOps *CastOp) {
5705   auto *Cast1 = dyn_cast<CastInst>(V1);
5706   if (!Cast1)
5707     return nullptr;
5708 
5709   *CastOp = Cast1->getOpcode();
5710   Type *SrcTy = Cast1->getSrcTy();
5711   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5712     // If V1 and V2 are both the same cast from the same type, look through V1.
5713     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5714       return Cast2->getOperand(0);
5715     return nullptr;
5716   }
5717 
5718   auto *C = dyn_cast<Constant>(V2);
5719   if (!C)
5720     return nullptr;
5721 
5722   Constant *CastedTo = nullptr;
5723   switch (*CastOp) {
5724   case Instruction::ZExt:
5725     if (CmpI->isUnsigned())
5726       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5727     break;
5728   case Instruction::SExt:
5729     if (CmpI->isSigned())
5730       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
5731     break;
5732   case Instruction::Trunc:
5733     Constant *CmpConst;
5734     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
5735         CmpConst->getType() == SrcTy) {
5736       // Here we have the following case:
5737       //
5738       //   %cond = cmp iN %x, CmpConst
5739       //   %tr = trunc iN %x to iK
5740       //   %narrowsel = select i1 %cond, iK %t, iK C
5741       //
5742       // We can always move trunc after select operation:
5743       //
5744       //   %cond = cmp iN %x, CmpConst
5745       //   %widesel = select i1 %cond, iN %x, iN CmpConst
5746       //   %tr = trunc iN %widesel to iK
5747       //
5748       // Note that C could be extended in any way because we don't care about
5749       // upper bits after truncation. It can't be abs pattern, because it would
5750       // look like:
5751       //
5752       //   select i1 %cond, x, -x.
5753       //
5754       // So only min/max pattern could be matched. Such match requires widened C
5755       // == CmpConst. That is why set widened C = CmpConst, condition trunc
5756       // CmpConst == C is checked below.
5757       CastedTo = CmpConst;
5758     } else {
5759       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5760     }
5761     break;
5762   case Instruction::FPTrunc:
5763     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5764     break;
5765   case Instruction::FPExt:
5766     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5767     break;
5768   case Instruction::FPToUI:
5769     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5770     break;
5771   case Instruction::FPToSI:
5772     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5773     break;
5774   case Instruction::UIToFP:
5775     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5776     break;
5777   case Instruction::SIToFP:
5778     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5779     break;
5780   default:
5781     break;
5782   }
5783 
5784   if (!CastedTo)
5785     return nullptr;
5786 
5787   // Make sure the cast doesn't lose any information.
5788   Constant *CastedBack =
5789       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5790   if (CastedBack != C)
5791     return nullptr;
5792 
5793   return CastedTo;
5794 }
5795 
5796 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5797                                              Instruction::CastOps *CastOp,
5798                                              unsigned Depth) {
5799   if (Depth >= MaxDepth)
5800     return {SPF_UNKNOWN, SPNB_NA, false};
5801 
5802   SelectInst *SI = dyn_cast<SelectInst>(V);
5803   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5804 
5805   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5806   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5807 
5808   Value *TrueVal = SI->getTrueValue();
5809   Value *FalseVal = SI->getFalseValue();
5810 
5811   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5812                                             CastOp, Depth);
5813 }
5814 
5815 SelectPatternResult llvm::matchDecomposedSelectPattern(
5816     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5817     Instruction::CastOps *CastOp, unsigned Depth) {
5818   CmpInst::Predicate Pred = CmpI->getPredicate();
5819   Value *CmpLHS = CmpI->getOperand(0);
5820   Value *CmpRHS = CmpI->getOperand(1);
5821   FastMathFlags FMF;
5822   if (isa<FPMathOperator>(CmpI))
5823     FMF = CmpI->getFastMathFlags();
5824 
5825   // Bail out early.
5826   if (CmpI->isEquality())
5827     return {SPF_UNKNOWN, SPNB_NA, false};
5828 
5829   // Deal with type mismatches.
5830   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5831     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5832       // If this is a potential fmin/fmax with a cast to integer, then ignore
5833       // -0.0 because there is no corresponding integer value.
5834       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5835         FMF.setNoSignedZeros();
5836       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5837                                   cast<CastInst>(TrueVal)->getOperand(0), C,
5838                                   LHS, RHS, Depth);
5839     }
5840     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5841       // If this is a potential fmin/fmax with a cast to integer, then ignore
5842       // -0.0 because there is no corresponding integer value.
5843       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5844         FMF.setNoSignedZeros();
5845       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5846                                   C, cast<CastInst>(FalseVal)->getOperand(0),
5847                                   LHS, RHS, Depth);
5848     }
5849   }
5850   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5851                               LHS, RHS, Depth);
5852 }
5853 
5854 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5855   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5856   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5857   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5858   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5859   if (SPF == SPF_FMINNUM)
5860     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5861   if (SPF == SPF_FMAXNUM)
5862     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5863   llvm_unreachable("unhandled!");
5864 }
5865 
5866 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5867   if (SPF == SPF_SMIN) return SPF_SMAX;
5868   if (SPF == SPF_UMIN) return SPF_UMAX;
5869   if (SPF == SPF_SMAX) return SPF_SMIN;
5870   if (SPF == SPF_UMAX) return SPF_UMIN;
5871   llvm_unreachable("unhandled!");
5872 }
5873 
5874 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
5875   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
5876 }
5877 
5878 /// Return true if "icmp Pred LHS RHS" is always true.
5879 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5880                             const Value *RHS, const DataLayout &DL,
5881                             unsigned Depth) {
5882   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
5883   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5884     return true;
5885 
5886   switch (Pred) {
5887   default:
5888     return false;
5889 
5890   case CmpInst::ICMP_SLE: {
5891     const APInt *C;
5892 
5893     // LHS s<= LHS +_{nsw} C   if C >= 0
5894     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5895       return !C->isNegative();
5896     return false;
5897   }
5898 
5899   case CmpInst::ICMP_ULE: {
5900     const APInt *C;
5901 
5902     // LHS u<= LHS +_{nuw} C   for any C
5903     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5904       return true;
5905 
5906     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5907     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5908                                        const Value *&X,
5909                                        const APInt *&CA, const APInt *&CB) {
5910       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
5911           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
5912         return true;
5913 
5914       // If X & C == 0 then (X | C) == X +_{nuw} C
5915       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
5916           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
5917         KnownBits Known(CA->getBitWidth());
5918         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
5919                          /*CxtI*/ nullptr, /*DT*/ nullptr);
5920         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
5921           return true;
5922       }
5923 
5924       return false;
5925     };
5926 
5927     const Value *X;
5928     const APInt *CLHS, *CRHS;
5929     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
5930       return CLHS->ule(*CRHS);
5931 
5932     return false;
5933   }
5934   }
5935 }
5936 
5937 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
5938 /// ALHS ARHS" is true.  Otherwise, return None.
5939 static Optional<bool>
5940 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
5941                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
5942                       const DataLayout &DL, unsigned Depth) {
5943   switch (Pred) {
5944   default:
5945     return None;
5946 
5947   case CmpInst::ICMP_SLT:
5948   case CmpInst::ICMP_SLE:
5949     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
5950         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
5951       return true;
5952     return None;
5953 
5954   case CmpInst::ICMP_ULT:
5955   case CmpInst::ICMP_ULE:
5956     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
5957         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
5958       return true;
5959     return None;
5960   }
5961 }
5962 
5963 /// Return true if the operands of the two compares match.  IsSwappedOps is true
5964 /// when the operands match, but are swapped.
5965 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
5966                           const Value *BLHS, const Value *BRHS,
5967                           bool &IsSwappedOps) {
5968 
5969   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
5970   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
5971   return IsMatchingOps || IsSwappedOps;
5972 }
5973 
5974 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
5975 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
5976 /// Otherwise, return None if we can't infer anything.
5977 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
5978                                                     CmpInst::Predicate BPred,
5979                                                     bool AreSwappedOps) {
5980   // Canonicalize the predicate as if the operands were not commuted.
5981   if (AreSwappedOps)
5982     BPred = ICmpInst::getSwappedPredicate(BPred);
5983 
5984   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
5985     return true;
5986   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
5987     return false;
5988 
5989   return None;
5990 }
5991 
5992 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
5993 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
5994 /// Otherwise, return None if we can't infer anything.
5995 static Optional<bool>
5996 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
5997                                  const ConstantInt *C1,
5998                                  CmpInst::Predicate BPred,
5999                                  const ConstantInt *C2) {
6000   ConstantRange DomCR =
6001       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6002   ConstantRange CR =
6003       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
6004   ConstantRange Intersection = DomCR.intersectWith(CR);
6005   ConstantRange Difference = DomCR.difference(CR);
6006   if (Intersection.isEmptySet())
6007     return false;
6008   if (Difference.isEmptySet())
6009     return true;
6010   return None;
6011 }
6012 
6013 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6014 /// false.  Otherwise, return None if we can't infer anything.
6015 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6016                                          CmpInst::Predicate BPred,
6017                                          const Value *BLHS, const Value *BRHS,
6018                                          const DataLayout &DL, bool LHSIsTrue,
6019                                          unsigned Depth) {
6020   Value *ALHS = LHS->getOperand(0);
6021   Value *ARHS = LHS->getOperand(1);
6022 
6023   // The rest of the logic assumes the LHS condition is true.  If that's not the
6024   // case, invert the predicate to make it so.
6025   CmpInst::Predicate APred =
6026       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6027 
6028   // Can we infer anything when the two compares have matching operands?
6029   bool AreSwappedOps;
6030   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6031     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6032             APred, BPred, AreSwappedOps))
6033       return Implication;
6034     // No amount of additional analysis will infer the second condition, so
6035     // early exit.
6036     return None;
6037   }
6038 
6039   // Can we infer anything when the LHS operands match and the RHS operands are
6040   // constants (not necessarily matching)?
6041   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6042     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6043             APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6044       return Implication;
6045     // No amount of additional analysis will infer the second condition, so
6046     // early exit.
6047     return None;
6048   }
6049 
6050   if (APred == BPred)
6051     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6052   return None;
6053 }
6054 
6055 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6056 /// false.  Otherwise, return None if we can't infer anything.  We expect the
6057 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
6058 static Optional<bool>
6059 isImpliedCondAndOr(const BinaryOperator *LHS, CmpInst::Predicate RHSPred,
6060                    const Value *RHSOp0, const Value *RHSOp1,
6061 
6062                    const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6063   // The LHS must be an 'or' or an 'and' instruction.
6064   assert((LHS->getOpcode() == Instruction::And ||
6065           LHS->getOpcode() == Instruction::Or) &&
6066          "Expected LHS to be 'and' or 'or'.");
6067 
6068   assert(Depth <= MaxDepth && "Hit recursion limit");
6069 
6070   // If the result of an 'or' is false, then we know both legs of the 'or' are
6071   // false.  Similarly, if the result of an 'and' is true, then we know both
6072   // legs of the 'and' are true.
6073   Value *ALHS, *ARHS;
6074   if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
6075       (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
6076     // FIXME: Make this non-recursion.
6077     if (Optional<bool> Implication = isImpliedCondition(
6078             ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6079       return Implication;
6080     if (Optional<bool> Implication = isImpliedCondition(
6081             ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6082       return Implication;
6083     return None;
6084   }
6085   return None;
6086 }
6087 
6088 Optional<bool>
6089 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6090                          const Value *RHSOp0, const Value *RHSOp1,
6091                          const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6092   // Bail out when we hit the limit.
6093   if (Depth == MaxDepth)
6094     return None;
6095 
6096   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6097   // example.
6098   if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6099     return None;
6100 
6101   Type *OpTy = LHS->getType();
6102   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6103 
6104   // FIXME: Extending the code below to handle vectors.
6105   if (OpTy->isVectorTy())
6106     return None;
6107 
6108   assert(OpTy->isIntegerTy(1) && "implied by above");
6109 
6110   // Both LHS and RHS are icmps.
6111   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6112   if (LHSCmp)
6113     return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6114                               Depth);
6115 
6116   /// The LHS should be an 'or' or an 'and' instruction.  We expect the RHS to
6117   /// be / an icmp. FIXME: Add support for and/or on the RHS.
6118   const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
6119   if (LHSBO) {
6120     if ((LHSBO->getOpcode() == Instruction::And ||
6121          LHSBO->getOpcode() == Instruction::Or))
6122       return isImpliedCondAndOr(LHSBO, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6123                                 Depth);
6124   }
6125   return None;
6126 }
6127 
6128 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6129                                         const DataLayout &DL, bool LHSIsTrue,
6130                                         unsigned Depth) {
6131   // LHS ==> RHS by definition
6132   if (LHS == RHS)
6133     return LHSIsTrue;
6134 
6135   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6136   if (RHSCmp)
6137     return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6138                               RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6139                               LHSIsTrue, Depth);
6140   return None;
6141 }
6142 
6143 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6144 // condition dominating ContextI or nullptr, if no condition is found.
6145 static std::pair<Value *, bool>
6146 getDomPredecessorCondition(const Instruction *ContextI) {
6147   if (!ContextI || !ContextI->getParent())
6148     return {nullptr, false};
6149 
6150   // TODO: This is a poor/cheap way to determine dominance. Should we use a
6151   // dominator tree (eg, from a SimplifyQuery) instead?
6152   const BasicBlock *ContextBB = ContextI->getParent();
6153   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6154   if (!PredBB)
6155     return {nullptr, false};
6156 
6157   // We need a conditional branch in the predecessor.
6158   Value *PredCond;
6159   BasicBlock *TrueBB, *FalseBB;
6160   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6161     return {nullptr, false};
6162 
6163   // The branch should get simplified. Don't bother simplifying this condition.
6164   if (TrueBB == FalseBB)
6165     return {nullptr, false};
6166 
6167   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6168          "Predecessor block does not point to successor?");
6169 
6170   // Is this condition implied by the predecessor condition?
6171   return {PredCond, TrueBB == ContextBB};
6172 }
6173 
6174 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6175                                              const Instruction *ContextI,
6176                                              const DataLayout &DL) {
6177   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6178   auto PredCond = getDomPredecessorCondition(ContextI);
6179   if (PredCond.first)
6180     return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6181   return None;
6182 }
6183 
6184 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6185                                              const Value *LHS, const Value *RHS,
6186                                              const Instruction *ContextI,
6187                                              const DataLayout &DL) {
6188   auto PredCond = getDomPredecessorCondition(ContextI);
6189   if (PredCond.first)
6190     return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6191                               PredCond.second);
6192   return None;
6193 }
6194 
6195 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6196                               APInt &Upper, const InstrInfoQuery &IIQ) {
6197   unsigned Width = Lower.getBitWidth();
6198   const APInt *C;
6199   switch (BO.getOpcode()) {
6200   case Instruction::Add:
6201     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6202       // FIXME: If we have both nuw and nsw, we should reduce the range further.
6203       if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6204         // 'add nuw x, C' produces [C, UINT_MAX].
6205         Lower = *C;
6206       } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6207         if (C->isNegative()) {
6208           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6209           Lower = APInt::getSignedMinValue(Width);
6210           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6211         } else {
6212           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6213           Lower = APInt::getSignedMinValue(Width) + *C;
6214           Upper = APInt::getSignedMaxValue(Width) + 1;
6215         }
6216       }
6217     }
6218     break;
6219 
6220   case Instruction::And:
6221     if (match(BO.getOperand(1), m_APInt(C)))
6222       // 'and x, C' produces [0, C].
6223       Upper = *C + 1;
6224     break;
6225 
6226   case Instruction::Or:
6227     if (match(BO.getOperand(1), m_APInt(C)))
6228       // 'or x, C' produces [C, UINT_MAX].
6229       Lower = *C;
6230     break;
6231 
6232   case Instruction::AShr:
6233     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6234       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6235       Lower = APInt::getSignedMinValue(Width).ashr(*C);
6236       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6237     } else if (match(BO.getOperand(0), m_APInt(C))) {
6238       unsigned ShiftAmount = Width - 1;
6239       if (!C->isNullValue() && IIQ.isExact(&BO))
6240         ShiftAmount = C->countTrailingZeros();
6241       if (C->isNegative()) {
6242         // 'ashr C, x' produces [C, C >> (Width-1)]
6243         Lower = *C;
6244         Upper = C->ashr(ShiftAmount) + 1;
6245       } else {
6246         // 'ashr C, x' produces [C >> (Width-1), C]
6247         Lower = C->ashr(ShiftAmount);
6248         Upper = *C + 1;
6249       }
6250     }
6251     break;
6252 
6253   case Instruction::LShr:
6254     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6255       // 'lshr x, C' produces [0, UINT_MAX >> C].
6256       Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
6257     } else if (match(BO.getOperand(0), m_APInt(C))) {
6258       // 'lshr C, x' produces [C >> (Width-1), C].
6259       unsigned ShiftAmount = Width - 1;
6260       if (!C->isNullValue() && IIQ.isExact(&BO))
6261         ShiftAmount = C->countTrailingZeros();
6262       Lower = C->lshr(ShiftAmount);
6263       Upper = *C + 1;
6264     }
6265     break;
6266 
6267   case Instruction::Shl:
6268     if (match(BO.getOperand(0), m_APInt(C))) {
6269       if (IIQ.hasNoUnsignedWrap(&BO)) {
6270         // 'shl nuw C, x' produces [C, C << CLZ(C)]
6271         Lower = *C;
6272         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6273       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6274         if (C->isNegative()) {
6275           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6276           unsigned ShiftAmount = C->countLeadingOnes() - 1;
6277           Lower = C->shl(ShiftAmount);
6278           Upper = *C + 1;
6279         } else {
6280           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6281           unsigned ShiftAmount = C->countLeadingZeros() - 1;
6282           Lower = *C;
6283           Upper = C->shl(ShiftAmount) + 1;
6284         }
6285       }
6286     }
6287     break;
6288 
6289   case Instruction::SDiv:
6290     if (match(BO.getOperand(1), m_APInt(C))) {
6291       APInt IntMin = APInt::getSignedMinValue(Width);
6292       APInt IntMax = APInt::getSignedMaxValue(Width);
6293       if (C->isAllOnesValue()) {
6294         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6295         //    where C != -1 and C != 0 and C != 1
6296         Lower = IntMin + 1;
6297         Upper = IntMax + 1;
6298       } else if (C->countLeadingZeros() < Width - 1) {
6299         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6300         //    where C != -1 and C != 0 and C != 1
6301         Lower = IntMin.sdiv(*C);
6302         Upper = IntMax.sdiv(*C);
6303         if (Lower.sgt(Upper))
6304           std::swap(Lower, Upper);
6305         Upper = Upper + 1;
6306         assert(Upper != Lower && "Upper part of range has wrapped!");
6307       }
6308     } else if (match(BO.getOperand(0), m_APInt(C))) {
6309       if (C->isMinSignedValue()) {
6310         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6311         Lower = *C;
6312         Upper = Lower.lshr(1) + 1;
6313       } else {
6314         // 'sdiv C, x' produces [-|C|, |C|].
6315         Upper = C->abs() + 1;
6316         Lower = (-Upper) + 1;
6317       }
6318     }
6319     break;
6320 
6321   case Instruction::UDiv:
6322     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6323       // 'udiv x, C' produces [0, UINT_MAX / C].
6324       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6325     } else if (match(BO.getOperand(0), m_APInt(C))) {
6326       // 'udiv C, x' produces [0, C].
6327       Upper = *C + 1;
6328     }
6329     break;
6330 
6331   case Instruction::SRem:
6332     if (match(BO.getOperand(1), m_APInt(C))) {
6333       // 'srem x, C' produces (-|C|, |C|).
6334       Upper = C->abs();
6335       Lower = (-Upper) + 1;
6336     }
6337     break;
6338 
6339   case Instruction::URem:
6340     if (match(BO.getOperand(1), m_APInt(C)))
6341       // 'urem x, C' produces [0, C).
6342       Upper = *C;
6343     break;
6344 
6345   default:
6346     break;
6347   }
6348 }
6349 
6350 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6351                                   APInt &Upper) {
6352   unsigned Width = Lower.getBitWidth();
6353   const APInt *C;
6354   switch (II.getIntrinsicID()) {
6355   case Intrinsic::uadd_sat:
6356     // uadd.sat(x, C) produces [C, UINT_MAX].
6357     if (match(II.getOperand(0), m_APInt(C)) ||
6358         match(II.getOperand(1), m_APInt(C)))
6359       Lower = *C;
6360     break;
6361   case Intrinsic::sadd_sat:
6362     if (match(II.getOperand(0), m_APInt(C)) ||
6363         match(II.getOperand(1), m_APInt(C))) {
6364       if (C->isNegative()) {
6365         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6366         Lower = APInt::getSignedMinValue(Width);
6367         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6368       } else {
6369         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6370         Lower = APInt::getSignedMinValue(Width) + *C;
6371         Upper = APInt::getSignedMaxValue(Width) + 1;
6372       }
6373     }
6374     break;
6375   case Intrinsic::usub_sat:
6376     // usub.sat(C, x) produces [0, C].
6377     if (match(II.getOperand(0), m_APInt(C)))
6378       Upper = *C + 1;
6379     // usub.sat(x, C) produces [0, UINT_MAX - C].
6380     else if (match(II.getOperand(1), m_APInt(C)))
6381       Upper = APInt::getMaxValue(Width) - *C + 1;
6382     break;
6383   case Intrinsic::ssub_sat:
6384     if (match(II.getOperand(0), m_APInt(C))) {
6385       if (C->isNegative()) {
6386         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6387         Lower = APInt::getSignedMinValue(Width);
6388         Upper = *C - APInt::getSignedMinValue(Width) + 1;
6389       } else {
6390         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6391         Lower = *C - APInt::getSignedMaxValue(Width);
6392         Upper = APInt::getSignedMaxValue(Width) + 1;
6393       }
6394     } else if (match(II.getOperand(1), m_APInt(C))) {
6395       if (C->isNegative()) {
6396         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6397         Lower = APInt::getSignedMinValue(Width) - *C;
6398         Upper = APInt::getSignedMaxValue(Width) + 1;
6399       } else {
6400         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6401         Lower = APInt::getSignedMinValue(Width);
6402         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6403       }
6404     }
6405     break;
6406   default:
6407     break;
6408   }
6409 }
6410 
6411 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6412                                       APInt &Upper, const InstrInfoQuery &IIQ) {
6413   const Value *LHS = nullptr, *RHS = nullptr;
6414   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6415   if (R.Flavor == SPF_UNKNOWN)
6416     return;
6417 
6418   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6419 
6420   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6421     // If the negation part of the abs (in RHS) has the NSW flag,
6422     // then the result of abs(X) is [0..SIGNED_MAX],
6423     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6424     Lower = APInt::getNullValue(BitWidth);
6425     if (match(RHS, m_Neg(m_Specific(LHS))) &&
6426         IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6427       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6428     else
6429       Upper = APInt::getSignedMinValue(BitWidth) + 1;
6430     return;
6431   }
6432 
6433   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6434     // The result of -abs(X) is <= 0.
6435     Lower = APInt::getSignedMinValue(BitWidth);
6436     Upper = APInt(BitWidth, 1);
6437     return;
6438   }
6439 
6440   const APInt *C;
6441   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6442     return;
6443 
6444   switch (R.Flavor) {
6445     case SPF_UMIN:
6446       Upper = *C + 1;
6447       break;
6448     case SPF_UMAX:
6449       Lower = *C;
6450       break;
6451     case SPF_SMIN:
6452       Lower = APInt::getSignedMinValue(BitWidth);
6453       Upper = *C + 1;
6454       break;
6455     case SPF_SMAX:
6456       Lower = *C;
6457       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6458       break;
6459     default:
6460       break;
6461   }
6462 }
6463 
6464 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
6465                                          AssumptionCache *AC,
6466                                          const Instruction *CtxI,
6467                                          unsigned Depth) {
6468   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
6469 
6470   if (Depth == MaxDepth)
6471     return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
6472 
6473   const APInt *C;
6474   if (match(V, m_APInt(C)))
6475     return ConstantRange(*C);
6476 
6477   InstrInfoQuery IIQ(UseInstrInfo);
6478   unsigned BitWidth = V->getType()->getScalarSizeInBits();
6479   APInt Lower = APInt(BitWidth, 0);
6480   APInt Upper = APInt(BitWidth, 0);
6481   if (auto *BO = dyn_cast<BinaryOperator>(V))
6482     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
6483   else if (auto *II = dyn_cast<IntrinsicInst>(V))
6484     setLimitsForIntrinsic(*II, Lower, Upper);
6485   else if (auto *SI = dyn_cast<SelectInst>(V))
6486     setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
6487 
6488   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
6489 
6490   if (auto *I = dyn_cast<Instruction>(V))
6491     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
6492       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
6493 
6494   if (CtxI && AC) {
6495     // Try to restrict the range based on information from assumptions.
6496     for (auto &AssumeVH : AC->assumptionsFor(V)) {
6497       if (!AssumeVH)
6498         continue;
6499       CallInst *I = cast<CallInst>(AssumeVH);
6500       assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
6501              "Got assumption for the wrong function!");
6502       assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
6503              "must be an assume intrinsic");
6504 
6505       if (!isValidAssumeForContext(I, CtxI, nullptr))
6506         continue;
6507       Value *Arg = I->getArgOperand(0);
6508       ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
6509       // Currently we just use information from comparisons.
6510       if (!Cmp || Cmp->getOperand(0) != V)
6511         continue;
6512       ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
6513                                                AC, I, Depth + 1);
6514       CR = CR.intersectWith(
6515           ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS));
6516     }
6517   }
6518 
6519   return CR;
6520 }
6521 
6522 static Optional<int64_t>
6523 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
6524   // Skip over the first indices.
6525   gep_type_iterator GTI = gep_type_begin(GEP);
6526   for (unsigned i = 1; i != Idx; ++i, ++GTI)
6527     /*skip along*/;
6528 
6529   // Compute the offset implied by the rest of the indices.
6530   int64_t Offset = 0;
6531   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
6532     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
6533     if (!OpC)
6534       return None;
6535     if (OpC->isZero())
6536       continue; // No offset.
6537 
6538     // Handle struct indices, which add their field offset to the pointer.
6539     if (StructType *STy = GTI.getStructTypeOrNull()) {
6540       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
6541       continue;
6542     }
6543 
6544     // Otherwise, we have a sequential type like an array or fixed-length
6545     // vector. Multiply the index by the ElementSize.
6546     TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
6547     if (Size.isScalable())
6548       return None;
6549     Offset += Size.getFixedSize() * OpC->getSExtValue();
6550   }
6551 
6552   return Offset;
6553 }
6554 
6555 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
6556                                         const DataLayout &DL) {
6557   Ptr1 = Ptr1->stripPointerCasts();
6558   Ptr2 = Ptr2->stripPointerCasts();
6559 
6560   // Handle the trivial case first.
6561   if (Ptr1 == Ptr2) {
6562     return 0;
6563   }
6564 
6565   const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
6566   const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
6567 
6568   // If one pointer is a GEP see if the GEP is a constant offset from the base,
6569   // as in "P" and "gep P, 1".
6570   // Also do this iteratively to handle the the following case:
6571   //   Ptr_t1 = GEP Ptr1, c1
6572   //   Ptr_t2 = GEP Ptr_t1, c2
6573   //   Ptr2 = GEP Ptr_t2, c3
6574   // where we will return c1+c2+c3.
6575   // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
6576   // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
6577   // are the same, and return the difference between offsets.
6578   auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
6579                                  const Value *Ptr) -> Optional<int64_t> {
6580     const GEPOperator *GEP_T = GEP;
6581     int64_t OffsetVal = 0;
6582     bool HasSameBase = false;
6583     while (GEP_T) {
6584       auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
6585       if (!Offset)
6586         return None;
6587       OffsetVal += *Offset;
6588       auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
6589       if (Op0 == Ptr) {
6590         HasSameBase = true;
6591         break;
6592       }
6593       GEP_T = dyn_cast<GEPOperator>(Op0);
6594     }
6595     if (!HasSameBase)
6596       return None;
6597     return OffsetVal;
6598   };
6599 
6600   if (GEP1) {
6601     auto Offset = getOffsetFromBase(GEP1, Ptr2);
6602     if (Offset)
6603       return -*Offset;
6604   }
6605   if (GEP2) {
6606     auto Offset = getOffsetFromBase(GEP2, Ptr1);
6607     if (Offset)
6608       return Offset;
6609   }
6610 
6611   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
6612   // base.  After that base, they may have some number of common (and
6613   // potentially variable) indices.  After that they handle some constant
6614   // offset, which determines their offset from each other.  At this point, we
6615   // handle no other case.
6616   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
6617     return None;
6618 
6619   // Skip any common indices and track the GEP types.
6620   unsigned Idx = 1;
6621   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
6622     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
6623       break;
6624 
6625   auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
6626   auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
6627   if (!Offset1 || !Offset2)
6628     return None;
6629   return *Offset2 - *Offset1;
6630 }
6631