1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GetElementPtrTypeIterator.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/IntrinsicsAArch64.h"
55 #include "llvm/IR/IntrinsicsX86.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/Operator.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/User.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/KnownBits.h"
69 #include "llvm/Support/MathExtras.h"
70 #include <algorithm>
71 #include <array>
72 #include <cassert>
73 #include <cstdint>
74 #include <iterator>
75 #include <utility>
76 
77 using namespace llvm;
78 using namespace llvm::PatternMatch;
79 
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83                                               cl::Hidden, cl::init(20));
84 
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88   if (unsigned BitWidth = Ty->getScalarSizeInBits())
89     return BitWidth;
90 
91   return DL.getPointerTypeSizeInBits(Ty);
92 }
93 
94 namespace {
95 
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101   const DataLayout &DL;
102   AssumptionCache *AC;
103   const Instruction *CxtI;
104   const DominatorTree *DT;
105 
106   // Unlike the other analyses, this may be a nullptr because not all clients
107   // provide it currently.
108   OptimizationRemarkEmitter *ORE;
109 
110   /// Set of assumptions that should be excluded from further queries.
111   /// This is because of the potential for mutual recursion to cause
112   /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113   /// classic case of this is assume(x = y), which will attempt to determine
114   /// bits in x from bits in y, which will attempt to determine bits in y from
115   /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116   /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117   /// (all of which can call computeKnownBits), and so on.
118   std::array<const Value *, MaxAnalysisRecursionDepth> Excluded;
119 
120   /// If true, it is safe to use metadata during simplification.
121   InstrInfoQuery IIQ;
122 
123   unsigned NumExcluded = 0;
124 
125   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126         const DominatorTree *DT, bool UseInstrInfo,
127         OptimizationRemarkEmitter *ORE = nullptr)
128       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129 
130   Query(const Query &Q, const Value *NewExcl)
131       : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132         NumExcluded(Q.NumExcluded) {
133     Excluded = Q.Excluded;
134     Excluded[NumExcluded++] = NewExcl;
135     assert(NumExcluded <= Excluded.size());
136   }
137 
138   bool isExcluded(const Value *Value) const {
139     if (NumExcluded == 0)
140       return false;
141     auto End = Excluded.begin() + NumExcluded;
142     return std::find(Excluded.begin(), End, Value) != End;
143   }
144 };
145 
146 } // end anonymous namespace
147 
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151   // If we've been provided with a context instruction, then use that (provided
152   // it has been inserted).
153   if (CxtI && CxtI->getParent())
154     return CxtI;
155 
156   // If the value is really an already-inserted instruction, then use that.
157   CxtI = dyn_cast<Instruction>(V);
158   if (CxtI && CxtI->getParent())
159     return CxtI;
160 
161   return nullptr;
162 }
163 
164 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
165                                    const APInt &DemandedElts,
166                                    APInt &DemandedLHS, APInt &DemandedRHS) {
167   // The length of scalable vectors is unknown at compile time, thus we
168   // cannot check their values
169   if (isa<ScalableVectorType>(Shuf->getType()))
170     return false;
171 
172   int NumElts =
173       cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
174   int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
175   DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts);
176   if (DemandedElts.isNullValue())
177     return true;
178   // Simple case of a shuffle with zeroinitializer.
179   if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
180     DemandedLHS.setBit(0);
181     return true;
182   }
183   for (int i = 0; i != NumMaskElts; ++i) {
184     if (!DemandedElts[i])
185       continue;
186     int M = Shuf->getMaskValue(i);
187     assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
188 
189     // For undef elements, we don't know anything about the common state of
190     // the shuffle result.
191     if (M == -1)
192       return false;
193     if (M < NumElts)
194       DemandedLHS.setBit(M % NumElts);
195     else
196       DemandedRHS.setBit(M % NumElts);
197   }
198 
199   return true;
200 }
201 
202 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
203                              KnownBits &Known, unsigned Depth, const Query &Q);
204 
205 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
206                              const Query &Q) {
207   // FIXME: We currently have no way to represent the DemandedElts of a scalable
208   // vector
209   if (isa<ScalableVectorType>(V->getType())) {
210     Known.resetAll();
211     return;
212   }
213 
214   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
215   APInt DemandedElts =
216       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
217   computeKnownBits(V, DemandedElts, Known, Depth, Q);
218 }
219 
220 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
221                             const DataLayout &DL, unsigned Depth,
222                             AssumptionCache *AC, const Instruction *CxtI,
223                             const DominatorTree *DT,
224                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
225   ::computeKnownBits(V, Known, Depth,
226                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
227 }
228 
229 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
230                             KnownBits &Known, const DataLayout &DL,
231                             unsigned Depth, AssumptionCache *AC,
232                             const Instruction *CxtI, const DominatorTree *DT,
233                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
234   ::computeKnownBits(V, DemandedElts, Known, Depth,
235                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
236 }
237 
238 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
239                                   unsigned Depth, const Query &Q);
240 
241 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
242                                   const Query &Q);
243 
244 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
245                                  unsigned Depth, AssumptionCache *AC,
246                                  const Instruction *CxtI,
247                                  const DominatorTree *DT,
248                                  OptimizationRemarkEmitter *ORE,
249                                  bool UseInstrInfo) {
250   return ::computeKnownBits(
251       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
252 }
253 
254 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
255                                  const DataLayout &DL, unsigned Depth,
256                                  AssumptionCache *AC, const Instruction *CxtI,
257                                  const DominatorTree *DT,
258                                  OptimizationRemarkEmitter *ORE,
259                                  bool UseInstrInfo) {
260   return ::computeKnownBits(
261       V, DemandedElts, Depth,
262       Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
263 }
264 
265 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
266                                const DataLayout &DL, AssumptionCache *AC,
267                                const Instruction *CxtI, const DominatorTree *DT,
268                                bool UseInstrInfo) {
269   assert(LHS->getType() == RHS->getType() &&
270          "LHS and RHS should have the same type");
271   assert(LHS->getType()->isIntOrIntVectorTy() &&
272          "LHS and RHS should be integers");
273   // Look for an inverted mask: (X & ~M) op (Y & M).
274   Value *M;
275   if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
276       match(RHS, m_c_And(m_Specific(M), m_Value())))
277     return true;
278   if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
279       match(LHS, m_c_And(m_Specific(M), m_Value())))
280     return true;
281   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
282   KnownBits LHSKnown(IT->getBitWidth());
283   KnownBits RHSKnown(IT->getBitWidth());
284   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
285   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
286   return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
287 }
288 
289 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
290   for (const User *U : CxtI->users()) {
291     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
292       if (IC->isEquality())
293         if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
294           if (C->isNullValue())
295             continue;
296     return false;
297   }
298   return true;
299 }
300 
301 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
302                                    const Query &Q);
303 
304 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
305                                   bool OrZero, unsigned Depth,
306                                   AssumptionCache *AC, const Instruction *CxtI,
307                                   const DominatorTree *DT, bool UseInstrInfo) {
308   return ::isKnownToBeAPowerOfTwo(
309       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
310 }
311 
312 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
313                            unsigned Depth, const Query &Q);
314 
315 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
316 
317 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
318                           AssumptionCache *AC, const Instruction *CxtI,
319                           const DominatorTree *DT, bool UseInstrInfo) {
320   return ::isKnownNonZero(V, Depth,
321                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
322 }
323 
324 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
325                               unsigned Depth, AssumptionCache *AC,
326                               const Instruction *CxtI, const DominatorTree *DT,
327                               bool UseInstrInfo) {
328   KnownBits Known =
329       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
330   return Known.isNonNegative();
331 }
332 
333 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
334                            AssumptionCache *AC, const Instruction *CxtI,
335                            const DominatorTree *DT, bool UseInstrInfo) {
336   if (auto *CI = dyn_cast<ConstantInt>(V))
337     return CI->getValue().isStrictlyPositive();
338 
339   // TODO: We'd doing two recursive queries here.  We should factor this such
340   // that only a single query is needed.
341   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
342          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
343 }
344 
345 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
346                            AssumptionCache *AC, const Instruction *CxtI,
347                            const DominatorTree *DT, bool UseInstrInfo) {
348   KnownBits Known =
349       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
350   return Known.isNegative();
351 }
352 
353 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
354                             const Query &Q);
355 
356 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
357                            const DataLayout &DL, AssumptionCache *AC,
358                            const Instruction *CxtI, const DominatorTree *DT,
359                            bool UseInstrInfo) {
360   return ::isKnownNonEqual(V1, V2, 0,
361                            Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
362                                  UseInstrInfo, /*ORE=*/nullptr));
363 }
364 
365 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
366                               const Query &Q);
367 
368 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
369                              const DataLayout &DL, unsigned Depth,
370                              AssumptionCache *AC, const Instruction *CxtI,
371                              const DominatorTree *DT, bool UseInstrInfo) {
372   return ::MaskedValueIsZero(
373       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
374 }
375 
376 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
377                                    unsigned Depth, const Query &Q);
378 
379 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
380                                    const Query &Q) {
381   // FIXME: We currently have no way to represent the DemandedElts of a scalable
382   // vector
383   if (isa<ScalableVectorType>(V->getType()))
384     return 1;
385 
386   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
387   APInt DemandedElts =
388       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
389   return ComputeNumSignBits(V, DemandedElts, Depth, Q);
390 }
391 
392 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
393                                   unsigned Depth, AssumptionCache *AC,
394                                   const Instruction *CxtI,
395                                   const DominatorTree *DT, bool UseInstrInfo) {
396   return ::ComputeNumSignBits(
397       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
398 }
399 
400 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
401                                    bool NSW, const APInt &DemandedElts,
402                                    KnownBits &KnownOut, KnownBits &Known2,
403                                    unsigned Depth, const Query &Q) {
404   computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
405 
406   // If one operand is unknown and we have no nowrap information,
407   // the result will be unknown independently of the second operand.
408   if (KnownOut.isUnknown() && !NSW)
409     return;
410 
411   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
412   KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
413 }
414 
415 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
416                                 const APInt &DemandedElts, KnownBits &Known,
417                                 KnownBits &Known2, unsigned Depth,
418                                 const Query &Q) {
419   computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
420   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
421 
422   bool isKnownNegative = false;
423   bool isKnownNonNegative = false;
424   // If the multiplication is known not to overflow, compute the sign bit.
425   if (NSW) {
426     if (Op0 == Op1) {
427       // The product of a number with itself is non-negative.
428       isKnownNonNegative = true;
429     } else {
430       bool isKnownNonNegativeOp1 = Known.isNonNegative();
431       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
432       bool isKnownNegativeOp1 = Known.isNegative();
433       bool isKnownNegativeOp0 = Known2.isNegative();
434       // The product of two numbers with the same sign is non-negative.
435       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
436                            (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
437       // The product of a negative number and a non-negative number is either
438       // negative or zero.
439       if (!isKnownNonNegative)
440         isKnownNegative =
441             (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
442              Known2.isNonZero()) ||
443             (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
444     }
445   }
446 
447   Known = KnownBits::computeForMul(Known, Known2);
448 
449   // Only make use of no-wrap flags if we failed to compute the sign bit
450   // directly.  This matters if the multiplication always overflows, in
451   // which case we prefer to follow the result of the direct computation,
452   // though as the program is invoking undefined behaviour we can choose
453   // whatever we like here.
454   if (isKnownNonNegative && !Known.isNegative())
455     Known.makeNonNegative();
456   else if (isKnownNegative && !Known.isNonNegative())
457     Known.makeNegative();
458 }
459 
460 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
461                                              KnownBits &Known) {
462   unsigned BitWidth = Known.getBitWidth();
463   unsigned NumRanges = Ranges.getNumOperands() / 2;
464   assert(NumRanges >= 1);
465 
466   Known.Zero.setAllBits();
467   Known.One.setAllBits();
468 
469   for (unsigned i = 0; i < NumRanges; ++i) {
470     ConstantInt *Lower =
471         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
472     ConstantInt *Upper =
473         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
474     ConstantRange Range(Lower->getValue(), Upper->getValue());
475 
476     // The first CommonPrefixBits of all values in Range are equal.
477     unsigned CommonPrefixBits =
478         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
479     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
480     APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
481     Known.One &= UnsignedMax & Mask;
482     Known.Zero &= ~UnsignedMax & Mask;
483   }
484 }
485 
486 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
487   SmallVector<const Value *, 16> WorkSet(1, I);
488   SmallPtrSet<const Value *, 32> Visited;
489   SmallPtrSet<const Value *, 16> EphValues;
490 
491   // The instruction defining an assumption's condition itself is always
492   // considered ephemeral to that assumption (even if it has other
493   // non-ephemeral users). See r246696's test case for an example.
494   if (is_contained(I->operands(), E))
495     return true;
496 
497   while (!WorkSet.empty()) {
498     const Value *V = WorkSet.pop_back_val();
499     if (!Visited.insert(V).second)
500       continue;
501 
502     // If all uses of this value are ephemeral, then so is this value.
503     if (llvm::all_of(V->users(), [&](const User *U) {
504                                    return EphValues.count(U);
505                                  })) {
506       if (V == E)
507         return true;
508 
509       if (V == I || isSafeToSpeculativelyExecute(V)) {
510        EphValues.insert(V);
511        if (const User *U = dyn_cast<User>(V))
512          for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
513               J != JE; ++J)
514            WorkSet.push_back(*J);
515       }
516     }
517   }
518 
519   return false;
520 }
521 
522 // Is this an intrinsic that cannot be speculated but also cannot trap?
523 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
524   if (const CallInst *CI = dyn_cast<CallInst>(I))
525     if (Function *F = CI->getCalledFunction())
526       switch (F->getIntrinsicID()) {
527       default: break;
528       // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
529       case Intrinsic::assume:
530       case Intrinsic::sideeffect:
531       case Intrinsic::pseudoprobe:
532       case Intrinsic::dbg_declare:
533       case Intrinsic::dbg_value:
534       case Intrinsic::dbg_label:
535       case Intrinsic::invariant_start:
536       case Intrinsic::invariant_end:
537       case Intrinsic::lifetime_start:
538       case Intrinsic::lifetime_end:
539       case Intrinsic::objectsize:
540       case Intrinsic::ptr_annotation:
541       case Intrinsic::var_annotation:
542         return true;
543       }
544 
545   return false;
546 }
547 
548 bool llvm::isValidAssumeForContext(const Instruction *Inv,
549                                    const Instruction *CxtI,
550                                    const DominatorTree *DT) {
551   // There are two restrictions on the use of an assume:
552   //  1. The assume must dominate the context (or the control flow must
553   //     reach the assume whenever it reaches the context).
554   //  2. The context must not be in the assume's set of ephemeral values
555   //     (otherwise we will use the assume to prove that the condition
556   //     feeding the assume is trivially true, thus causing the removal of
557   //     the assume).
558 
559   if (Inv->getParent() == CxtI->getParent()) {
560     // If Inv and CtxI are in the same block, check if the assume (Inv) is first
561     // in the BB.
562     if (Inv->comesBefore(CxtI))
563       return true;
564 
565     // Don't let an assume affect itself - this would cause the problems
566     // `isEphemeralValueOf` is trying to prevent, and it would also make
567     // the loop below go out of bounds.
568     if (Inv == CxtI)
569       return false;
570 
571     // The context comes first, but they're both in the same block.
572     // Make sure there is nothing in between that might interrupt
573     // the control flow, not even CxtI itself.
574     for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
575       if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
576         return false;
577 
578     return !isEphemeralValueOf(Inv, CxtI);
579   }
580 
581   // Inv and CxtI are in different blocks.
582   if (DT) {
583     if (DT->dominates(Inv, CxtI))
584       return true;
585   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
586     // We don't have a DT, but this trivially dominates.
587     return true;
588   }
589 
590   return false;
591 }
592 
593 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
594   // Use of assumptions is context-sensitive. If we don't have a context, we
595   // cannot use them!
596   if (!Q.AC || !Q.CxtI)
597     return false;
598 
599   // Note that the patterns below need to be kept in sync with the code
600   // in AssumptionCache::updateAffectedValues.
601 
602   auto CmpExcludesZero = [V](ICmpInst *Cmp) {
603     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
604 
605     Value *RHS;
606     CmpInst::Predicate Pred;
607     if (!match(Cmp, m_c_ICmp(Pred, m_V, m_Value(RHS))))
608       return false;
609     // assume(v u> y) -> assume(v != 0)
610     if (Pred == ICmpInst::ICMP_UGT)
611       return true;
612 
613     // assume(v != 0)
614     // We special-case this one to ensure that we handle `assume(v != null)`.
615     if (Pred == ICmpInst::ICMP_NE)
616       return match(RHS, m_Zero());
617 
618     // All other predicates - rely on generic ConstantRange handling.
619     ConstantInt *CI;
620     if (!match(RHS, m_ConstantInt(CI)))
621       return false;
622     ConstantRange RHSRange(CI->getValue());
623     ConstantRange TrueValues =
624         ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
625     return !TrueValues.contains(APInt::getNullValue(CI->getBitWidth()));
626   };
627 
628   if (Q.CxtI && V->getType()->isPointerTy()) {
629     SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
630     if (!NullPointerIsDefined(Q.CxtI->getFunction(),
631                               V->getType()->getPointerAddressSpace()))
632       AttrKinds.push_back(Attribute::Dereferenceable);
633 
634     if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
635       return true;
636   }
637 
638   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
639     if (!AssumeVH)
640       continue;
641     CallInst *I = cast<CallInst>(AssumeVH);
642     assert(I->getFunction() == Q.CxtI->getFunction() &&
643            "Got assumption for the wrong function!");
644     if (Q.isExcluded(I))
645       continue;
646 
647     // Warning: This loop can end up being somewhat performance sensitive.
648     // We're running this loop for once for each value queried resulting in a
649     // runtime of ~O(#assumes * #values).
650 
651     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
652            "must be an assume intrinsic");
653 
654     Value *Arg = I->getArgOperand(0);
655     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
656     if (!Cmp)
657       continue;
658 
659     if (CmpExcludesZero(Cmp) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
660       return true;
661   }
662 
663   return false;
664 }
665 
666 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
667                                        unsigned Depth, const Query &Q) {
668   // Use of assumptions is context-sensitive. If we don't have a context, we
669   // cannot use them!
670   if (!Q.AC || !Q.CxtI)
671     return;
672 
673   unsigned BitWidth = Known.getBitWidth();
674 
675   // Refine Known set if the pointer alignment is set by assume bundles.
676   if (V->getType()->isPointerTy()) {
677     if (RetainedKnowledge RK = getKnowledgeValidInContext(
678             V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
679       Known.Zero.setLowBits(Log2_32(RK.ArgValue));
680     }
681   }
682 
683   // Note that the patterns below need to be kept in sync with the code
684   // in AssumptionCache::updateAffectedValues.
685 
686   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
687     if (!AssumeVH)
688       continue;
689     CallInst *I = cast<CallInst>(AssumeVH);
690     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
691            "Got assumption for the wrong function!");
692     if (Q.isExcluded(I))
693       continue;
694 
695     // Warning: This loop can end up being somewhat performance sensitive.
696     // We're running this loop for once for each value queried resulting in a
697     // runtime of ~O(#assumes * #values).
698 
699     assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
700            "must be an assume intrinsic");
701 
702     Value *Arg = I->getArgOperand(0);
703 
704     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
705       assert(BitWidth == 1 && "assume operand is not i1?");
706       Known.setAllOnes();
707       return;
708     }
709     if (match(Arg, m_Not(m_Specific(V))) &&
710         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
711       assert(BitWidth == 1 && "assume operand is not i1?");
712       Known.setAllZero();
713       return;
714     }
715 
716     // The remaining tests are all recursive, so bail out if we hit the limit.
717     if (Depth == MaxAnalysisRecursionDepth)
718       continue;
719 
720     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
721     if (!Cmp)
722       continue;
723 
724     // Note that ptrtoint may change the bitwidth.
725     Value *A, *B;
726     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
727 
728     CmpInst::Predicate Pred;
729     uint64_t C;
730     switch (Cmp->getPredicate()) {
731     default:
732       break;
733     case ICmpInst::ICMP_EQ:
734       // assume(v = a)
735       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
736           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
737         KnownBits RHSKnown =
738             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
739         Known.Zero |= RHSKnown.Zero;
740         Known.One  |= RHSKnown.One;
741       // assume(v & b = a)
742       } else if (match(Cmp,
743                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
744                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
745         KnownBits RHSKnown =
746             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
747         KnownBits MaskKnown =
748             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
749 
750         // For those bits in the mask that are known to be one, we can propagate
751         // known bits from the RHS to V.
752         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
753         Known.One  |= RHSKnown.One  & MaskKnown.One;
754       // assume(~(v & b) = a)
755       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
756                                      m_Value(A))) &&
757                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
758         KnownBits RHSKnown =
759             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
760         KnownBits MaskKnown =
761             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
762 
763         // For those bits in the mask that are known to be one, we can propagate
764         // inverted known bits from the RHS to V.
765         Known.Zero |= RHSKnown.One  & MaskKnown.One;
766         Known.One  |= RHSKnown.Zero & MaskKnown.One;
767       // assume(v | b = a)
768       } else if (match(Cmp,
769                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
770                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
771         KnownBits RHSKnown =
772             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
773         KnownBits BKnown =
774             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
775 
776         // For those bits in B that are known to be zero, we can propagate known
777         // bits from the RHS to V.
778         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
779         Known.One  |= RHSKnown.One  & BKnown.Zero;
780       // assume(~(v | b) = a)
781       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
782                                      m_Value(A))) &&
783                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
784         KnownBits RHSKnown =
785             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
786         KnownBits BKnown =
787             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
788 
789         // For those bits in B that are known to be zero, we can propagate
790         // inverted known bits from the RHS to V.
791         Known.Zero |= RHSKnown.One  & BKnown.Zero;
792         Known.One  |= RHSKnown.Zero & BKnown.Zero;
793       // assume(v ^ b = a)
794       } else if (match(Cmp,
795                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
796                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
797         KnownBits RHSKnown =
798             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
799         KnownBits BKnown =
800             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
801 
802         // For those bits in B that are known to be zero, we can propagate known
803         // bits from the RHS to V. For those bits in B that are known to be one,
804         // we can propagate inverted known bits from the RHS to V.
805         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
806         Known.One  |= RHSKnown.One  & BKnown.Zero;
807         Known.Zero |= RHSKnown.One  & BKnown.One;
808         Known.One  |= RHSKnown.Zero & BKnown.One;
809       // assume(~(v ^ b) = a)
810       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
811                                      m_Value(A))) &&
812                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
813         KnownBits RHSKnown =
814             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
815         KnownBits BKnown =
816             computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
817 
818         // For those bits in B that are known to be zero, we can propagate
819         // inverted known bits from the RHS to V. For those bits in B that are
820         // known to be one, we can propagate known bits from the RHS to V.
821         Known.Zero |= RHSKnown.One  & BKnown.Zero;
822         Known.One  |= RHSKnown.Zero & BKnown.Zero;
823         Known.Zero |= RHSKnown.Zero & BKnown.One;
824         Known.One  |= RHSKnown.One  & BKnown.One;
825       // assume(v << c = a)
826       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
827                                      m_Value(A))) &&
828                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
829         KnownBits RHSKnown =
830             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
831 
832         // For those bits in RHS that are known, we can propagate them to known
833         // bits in V shifted to the right by C.
834         RHSKnown.Zero.lshrInPlace(C);
835         Known.Zero |= RHSKnown.Zero;
836         RHSKnown.One.lshrInPlace(C);
837         Known.One  |= RHSKnown.One;
838       // assume(~(v << c) = a)
839       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
840                                      m_Value(A))) &&
841                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
842         KnownBits RHSKnown =
843             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
844         // For those bits in RHS that are known, we can propagate them inverted
845         // to known bits in V shifted to the right by C.
846         RHSKnown.One.lshrInPlace(C);
847         Known.Zero |= RHSKnown.One;
848         RHSKnown.Zero.lshrInPlace(C);
849         Known.One  |= RHSKnown.Zero;
850       // assume(v >> c = a)
851       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
852                                      m_Value(A))) &&
853                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
854         KnownBits RHSKnown =
855             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
856         // For those bits in RHS that are known, we can propagate them to known
857         // bits in V shifted to the right by C.
858         Known.Zero |= RHSKnown.Zero << C;
859         Known.One  |= RHSKnown.One  << C;
860       // assume(~(v >> c) = a)
861       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
862                                      m_Value(A))) &&
863                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
864         KnownBits RHSKnown =
865             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
866         // For those bits in RHS that are known, we can propagate them inverted
867         // to known bits in V shifted to the right by C.
868         Known.Zero |= RHSKnown.One  << C;
869         Known.One  |= RHSKnown.Zero << C;
870       }
871       break;
872     case ICmpInst::ICMP_SGE:
873       // assume(v >=_s c) where c is non-negative
874       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
875           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
876         KnownBits RHSKnown =
877             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
878 
879         if (RHSKnown.isNonNegative()) {
880           // We know that the sign bit is zero.
881           Known.makeNonNegative();
882         }
883       }
884       break;
885     case ICmpInst::ICMP_SGT:
886       // assume(v >_s c) where c is at least -1.
887       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
888           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
889         KnownBits RHSKnown =
890             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
891 
892         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
893           // We know that the sign bit is zero.
894           Known.makeNonNegative();
895         }
896       }
897       break;
898     case ICmpInst::ICMP_SLE:
899       // assume(v <=_s c) where c is negative
900       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
901           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
902         KnownBits RHSKnown =
903             computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
904 
905         if (RHSKnown.isNegative()) {
906           // We know that the sign bit is one.
907           Known.makeNegative();
908         }
909       }
910       break;
911     case ICmpInst::ICMP_SLT:
912       // assume(v <_s c) where c is non-positive
913       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
914           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
915         KnownBits RHSKnown =
916             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
917 
918         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
919           // We know that the sign bit is one.
920           Known.makeNegative();
921         }
922       }
923       break;
924     case ICmpInst::ICMP_ULE:
925       // assume(v <=_u c)
926       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
927           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
928         KnownBits RHSKnown =
929             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
930 
931         // Whatever high bits in c are zero are known to be zero.
932         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
933       }
934       break;
935     case ICmpInst::ICMP_ULT:
936       // assume(v <_u c)
937       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
938           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
939         KnownBits RHSKnown =
940             computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
941 
942         // If the RHS is known zero, then this assumption must be wrong (nothing
943         // is unsigned less than zero). Signal a conflict and get out of here.
944         if (RHSKnown.isZero()) {
945           Known.Zero.setAllBits();
946           Known.One.setAllBits();
947           break;
948         }
949 
950         // Whatever high bits in c are zero are known to be zero (if c is a power
951         // of 2, then one more).
952         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
953           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
954         else
955           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
956       }
957       break;
958     }
959   }
960 
961   // If assumptions conflict with each other or previous known bits, then we
962   // have a logical fallacy. It's possible that the assumption is not reachable,
963   // so this isn't a real bug. On the other hand, the program may have undefined
964   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
965   // clear out the known bits, try to warn the user, and hope for the best.
966   if (Known.Zero.intersects(Known.One)) {
967     Known.resetAll();
968 
969     if (Q.ORE)
970       Q.ORE->emit([&]() {
971         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
972         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
973                                           CxtI)
974                << "Detected conflicting code assumptions. Program may "
975                   "have undefined behavior, or compiler may have "
976                   "internal error.";
977       });
978   }
979 }
980 
981 /// Compute known bits from a shift operator, including those with a
982 /// non-constant shift amount. Known is the output of this function. Known2 is a
983 /// pre-allocated temporary with the same bit width as Known and on return
984 /// contains the known bit of the shift value source. KF is an
985 /// operator-specific function that, given the known-bits and a shift amount,
986 /// compute the implied known-bits of the shift operator's result respectively
987 /// for that shift amount. The results from calling KF are conservatively
988 /// combined for all permitted shift amounts.
989 static void computeKnownBitsFromShiftOperator(
990     const Operator *I, const APInt &DemandedElts, KnownBits &Known,
991     KnownBits &Known2, unsigned Depth, const Query &Q,
992     function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
993   unsigned BitWidth = Known.getBitWidth();
994   computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
995   computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
996 
997   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
998   // BitWidth > 64 and any upper bits are known, we'll end up returning the
999   // limit value (which implies all bits are known).
1000   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
1001   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
1002   bool ShiftAmtIsConstant = Known.isConstant();
1003   bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
1004 
1005   if (ShiftAmtIsConstant) {
1006     Known = KF(Known2, Known);
1007 
1008     // If the known bits conflict, this must be an overflowing left shift, so
1009     // the shift result is poison. We can return anything we want. Choose 0 for
1010     // the best folding opportunity.
1011     if (Known.hasConflict())
1012       Known.setAllZero();
1013 
1014     return;
1015   }
1016 
1017   // If the shift amount could be greater than or equal to the bit-width of the
1018   // LHS, the value could be poison, but bail out because the check below is
1019   // expensive.
1020   // TODO: Should we just carry on?
1021   if (MaxShiftAmtIsOutOfRange) {
1022     Known.resetAll();
1023     return;
1024   }
1025 
1026   // It would be more-clearly correct to use the two temporaries for this
1027   // calculation. Reusing the APInts here to prevent unnecessary allocations.
1028   Known.resetAll();
1029 
1030   // If we know the shifter operand is nonzero, we can sometimes infer more
1031   // known bits. However this is expensive to compute, so be lazy about it and
1032   // only compute it when absolutely necessary.
1033   Optional<bool> ShifterOperandIsNonZero;
1034 
1035   // Early exit if we can't constrain any well-defined shift amount.
1036   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1037       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1038     ShifterOperandIsNonZero =
1039         isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1040     if (!*ShifterOperandIsNonZero)
1041       return;
1042   }
1043 
1044   Known.Zero.setAllBits();
1045   Known.One.setAllBits();
1046   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1047     // Combine the shifted known input bits only for those shift amounts
1048     // compatible with its known constraints.
1049     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1050       continue;
1051     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1052       continue;
1053     // If we know the shifter is nonzero, we may be able to infer more known
1054     // bits. This check is sunk down as far as possible to avoid the expensive
1055     // call to isKnownNonZero if the cheaper checks above fail.
1056     if (ShiftAmt == 0) {
1057       if (!ShifterOperandIsNonZero.hasValue())
1058         ShifterOperandIsNonZero =
1059             isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1060       if (*ShifterOperandIsNonZero)
1061         continue;
1062     }
1063 
1064     Known = KnownBits::commonBits(
1065         Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1066   }
1067 
1068   // If the known bits conflict, the result is poison. Return a 0 and hope the
1069   // caller can further optimize that.
1070   if (Known.hasConflict())
1071     Known.setAllZero();
1072 }
1073 
1074 static void computeKnownBitsFromOperator(const Operator *I,
1075                                          const APInt &DemandedElts,
1076                                          KnownBits &Known, unsigned Depth,
1077                                          const Query &Q) {
1078   unsigned BitWidth = Known.getBitWidth();
1079 
1080   KnownBits Known2(BitWidth);
1081   switch (I->getOpcode()) {
1082   default: break;
1083   case Instruction::Load:
1084     if (MDNode *MD =
1085             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1086       computeKnownBitsFromRangeMetadata(*MD, Known);
1087     break;
1088   case Instruction::And: {
1089     // If either the LHS or the RHS are Zero, the result is zero.
1090     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1091     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1092 
1093     Known &= Known2;
1094 
1095     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1096     // here we handle the more general case of adding any odd number by
1097     // matching the form add(x, add(x, y)) where y is odd.
1098     // TODO: This could be generalized to clearing any bit set in y where the
1099     // following bit is known to be unset in y.
1100     Value *X = nullptr, *Y = nullptr;
1101     if (!Known.Zero[0] && !Known.One[0] &&
1102         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1103       Known2.resetAll();
1104       computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1105       if (Known2.countMinTrailingOnes() > 0)
1106         Known.Zero.setBit(0);
1107     }
1108     break;
1109   }
1110   case Instruction::Or:
1111     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1112     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1113 
1114     Known |= Known2;
1115     break;
1116   case Instruction::Xor:
1117     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1118     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1119 
1120     Known ^= Known2;
1121     break;
1122   case Instruction::Mul: {
1123     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1124     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1125                         Known, Known2, Depth, Q);
1126     break;
1127   }
1128   case Instruction::UDiv: {
1129     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1130     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1131     Known = KnownBits::udiv(Known, Known2);
1132     break;
1133   }
1134   case Instruction::Select: {
1135     const Value *LHS = nullptr, *RHS = nullptr;
1136     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1137     if (SelectPatternResult::isMinOrMax(SPF)) {
1138       computeKnownBits(RHS, Known, Depth + 1, Q);
1139       computeKnownBits(LHS, Known2, Depth + 1, Q);
1140       switch (SPF) {
1141       default:
1142         llvm_unreachable("Unhandled select pattern flavor!");
1143       case SPF_SMAX:
1144         Known = KnownBits::smax(Known, Known2);
1145         break;
1146       case SPF_SMIN:
1147         Known = KnownBits::smin(Known, Known2);
1148         break;
1149       case SPF_UMAX:
1150         Known = KnownBits::umax(Known, Known2);
1151         break;
1152       case SPF_UMIN:
1153         Known = KnownBits::umin(Known, Known2);
1154         break;
1155       }
1156       break;
1157     }
1158 
1159     computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1160     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1161 
1162     // Only known if known in both the LHS and RHS.
1163     Known = KnownBits::commonBits(Known, Known2);
1164 
1165     if (SPF == SPF_ABS) {
1166       // RHS from matchSelectPattern returns the negation part of abs pattern.
1167       // If the negate has an NSW flag we can assume the sign bit of the result
1168       // will be 0 because that makes abs(INT_MIN) undefined.
1169       if (match(RHS, m_Neg(m_Specific(LHS))) &&
1170           Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1171         Known.Zero.setSignBit();
1172     }
1173 
1174     break;
1175   }
1176   case Instruction::FPTrunc:
1177   case Instruction::FPExt:
1178   case Instruction::FPToUI:
1179   case Instruction::FPToSI:
1180   case Instruction::SIToFP:
1181   case Instruction::UIToFP:
1182     break; // Can't work with floating point.
1183   case Instruction::PtrToInt:
1184   case Instruction::IntToPtr:
1185     // Fall through and handle them the same as zext/trunc.
1186     LLVM_FALLTHROUGH;
1187   case Instruction::ZExt:
1188   case Instruction::Trunc: {
1189     Type *SrcTy = I->getOperand(0)->getType();
1190 
1191     unsigned SrcBitWidth;
1192     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1193     // which fall through here.
1194     Type *ScalarTy = SrcTy->getScalarType();
1195     SrcBitWidth = ScalarTy->isPointerTy() ?
1196       Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1197       Q.DL.getTypeSizeInBits(ScalarTy);
1198 
1199     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1200     Known = Known.anyextOrTrunc(SrcBitWidth);
1201     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1202     Known = Known.zextOrTrunc(BitWidth);
1203     break;
1204   }
1205   case Instruction::BitCast: {
1206     Type *SrcTy = I->getOperand(0)->getType();
1207     if (SrcTy->isIntOrPtrTy() &&
1208         // TODO: For now, not handling conversions like:
1209         // (bitcast i64 %x to <2 x i32>)
1210         !I->getType()->isVectorTy()) {
1211       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1212       break;
1213     }
1214     break;
1215   }
1216   case Instruction::SExt: {
1217     // Compute the bits in the result that are not present in the input.
1218     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1219 
1220     Known = Known.trunc(SrcBitWidth);
1221     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1222     // If the sign bit of the input is known set or clear, then we know the
1223     // top bits of the result.
1224     Known = Known.sext(BitWidth);
1225     break;
1226   }
1227   case Instruction::Shl: {
1228     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1229     auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1230       KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1231       // If this shift has "nsw" keyword, then the result is either a poison
1232       // value or has the same sign bit as the first operand.
1233       if (NSW) {
1234         if (KnownVal.Zero.isSignBitSet())
1235           Result.Zero.setSignBit();
1236         if (KnownVal.One.isSignBitSet())
1237           Result.One.setSignBit();
1238       }
1239       return Result;
1240     };
1241     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1242                                       KF);
1243     break;
1244   }
1245   case Instruction::LShr: {
1246     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1247       return KnownBits::lshr(KnownVal, KnownAmt);
1248     };
1249     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1250                                       KF);
1251     break;
1252   }
1253   case Instruction::AShr: {
1254     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1255       return KnownBits::ashr(KnownVal, KnownAmt);
1256     };
1257     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1258                                       KF);
1259     break;
1260   }
1261   case Instruction::Sub: {
1262     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1263     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1264                            DemandedElts, Known, Known2, Depth, Q);
1265     break;
1266   }
1267   case Instruction::Add: {
1268     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1269     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1270                            DemandedElts, Known, Known2, Depth, Q);
1271     break;
1272   }
1273   case Instruction::SRem:
1274     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1275     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1276     Known = KnownBits::srem(Known, Known2);
1277     break;
1278 
1279   case Instruction::URem:
1280     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1281     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1282     Known = KnownBits::urem(Known, Known2);
1283     break;
1284   case Instruction::Alloca:
1285     Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1286     break;
1287   case Instruction::GetElementPtr: {
1288     // Analyze all of the subscripts of this getelementptr instruction
1289     // to determine if we can prove known low zero bits.
1290     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1291     // Accumulate the constant indices in a separate variable
1292     // to minimize the number of calls to computeForAddSub.
1293     APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1294 
1295     gep_type_iterator GTI = gep_type_begin(I);
1296     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1297       // TrailZ can only become smaller, short-circuit if we hit zero.
1298       if (Known.isUnknown())
1299         break;
1300 
1301       Value *Index = I->getOperand(i);
1302 
1303       // Handle case when index is zero.
1304       Constant *CIndex = dyn_cast<Constant>(Index);
1305       if (CIndex && CIndex->isZeroValue())
1306         continue;
1307 
1308       if (StructType *STy = GTI.getStructTypeOrNull()) {
1309         // Handle struct member offset arithmetic.
1310 
1311         assert(CIndex &&
1312                "Access to structure field must be known at compile time");
1313 
1314         if (CIndex->getType()->isVectorTy())
1315           Index = CIndex->getSplatValue();
1316 
1317         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1318         const StructLayout *SL = Q.DL.getStructLayout(STy);
1319         uint64_t Offset = SL->getElementOffset(Idx);
1320         AccConstIndices += Offset;
1321         continue;
1322       }
1323 
1324       // Handle array index arithmetic.
1325       Type *IndexedTy = GTI.getIndexedType();
1326       if (!IndexedTy->isSized()) {
1327         Known.resetAll();
1328         break;
1329       }
1330 
1331       unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1332       KnownBits IndexBits(IndexBitWidth);
1333       computeKnownBits(Index, IndexBits, Depth + 1, Q);
1334       TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1335       uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1336       KnownBits ScalingFactor(IndexBitWidth);
1337       // Multiply by current sizeof type.
1338       // &A[i] == A + i * sizeof(*A[i]).
1339       if (IndexTypeSize.isScalable()) {
1340         // For scalable types the only thing we know about sizeof is
1341         // that this is a multiple of the minimum size.
1342         ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1343       } else if (IndexBits.isConstant()) {
1344         APInt IndexConst = IndexBits.getConstant();
1345         APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1346         IndexConst *= ScalingFactor;
1347         AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1348         continue;
1349       } else {
1350         ScalingFactor.Zero = ~TypeSizeInBytes;
1351         ScalingFactor.One = TypeSizeInBytes;
1352       }
1353       IndexBits = KnownBits::computeForMul(IndexBits, ScalingFactor);
1354 
1355       // If the offsets have a different width from the pointer, according
1356       // to the language reference we need to sign-extend or truncate them
1357       // to the width of the pointer.
1358       IndexBits = IndexBits.sextOrTrunc(BitWidth);
1359 
1360       // Note that inbounds does *not* guarantee nsw for the addition, as only
1361       // the offset is signed, while the base address is unsigned.
1362       Known = KnownBits::computeForAddSub(
1363           /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1364     }
1365     if (!Known.isUnknown() && !AccConstIndices.isNullValue()) {
1366       KnownBits Index(BitWidth);
1367       Index.Zero = ~AccConstIndices;
1368       Index.One = AccConstIndices;
1369       Known = KnownBits::computeForAddSub(
1370           /*Add=*/true, /*NSW=*/false, Known, Index);
1371     }
1372     break;
1373   }
1374   case Instruction::PHI: {
1375     const PHINode *P = cast<PHINode>(I);
1376     // Handle the case of a simple two-predecessor recurrence PHI.
1377     // There's a lot more that could theoretically be done here, but
1378     // this is sufficient to catch some interesting cases.
1379     if (P->getNumIncomingValues() == 2) {
1380       for (unsigned i = 0; i != 2; ++i) {
1381         Value *L = P->getIncomingValue(i);
1382         Value *R = P->getIncomingValue(!i);
1383         Instruction *RInst = P->getIncomingBlock(!i)->getTerminator();
1384         Instruction *LInst = P->getIncomingBlock(i)->getTerminator();
1385         Operator *LU = dyn_cast<Operator>(L);
1386         if (!LU)
1387           continue;
1388         unsigned Opcode = LU->getOpcode();
1389         // Check for operations that have the property that if
1390         // both their operands have low zero bits, the result
1391         // will have low zero bits.
1392         if (Opcode == Instruction::Add ||
1393             Opcode == Instruction::Sub ||
1394             Opcode == Instruction::And ||
1395             Opcode == Instruction::Or ||
1396             Opcode == Instruction::Mul) {
1397           Value *LL = LU->getOperand(0);
1398           Value *LR = LU->getOperand(1);
1399           // Find a recurrence.
1400           if (LL == I)
1401             L = LR;
1402           else if (LR == I)
1403             L = LL;
1404           else
1405             continue; // Check for recurrence with L and R flipped.
1406 
1407           // Change the context instruction to the "edge" that flows into the
1408           // phi. This is important because that is where the value is actually
1409           // "evaluated" even though it is used later somewhere else. (see also
1410           // D69571).
1411           Query RecQ = Q;
1412 
1413           // Ok, we have a PHI of the form L op= R. Check for low
1414           // zero bits.
1415           RecQ.CxtI = RInst;
1416           computeKnownBits(R, Known2, Depth + 1, RecQ);
1417 
1418           // We need to take the minimum number of known bits
1419           KnownBits Known3(BitWidth);
1420           RecQ.CxtI = LInst;
1421           computeKnownBits(L, Known3, Depth + 1, RecQ);
1422 
1423           Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1424                                          Known3.countMinTrailingZeros()));
1425 
1426           auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1427           if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1428             // If initial value of recurrence is nonnegative, and we are adding
1429             // a nonnegative number with nsw, the result can only be nonnegative
1430             // or poison value regardless of the number of times we execute the
1431             // add in phi recurrence. If initial value is negative and we are
1432             // adding a negative number with nsw, the result can only be
1433             // negative or poison value. Similar arguments apply to sub and mul.
1434             //
1435             // (add non-negative, non-negative) --> non-negative
1436             // (add negative, negative) --> negative
1437             if (Opcode == Instruction::Add) {
1438               if (Known2.isNonNegative() && Known3.isNonNegative())
1439                 Known.makeNonNegative();
1440               else if (Known2.isNegative() && Known3.isNegative())
1441                 Known.makeNegative();
1442             }
1443 
1444             // (sub nsw non-negative, negative) --> non-negative
1445             // (sub nsw negative, non-negative) --> negative
1446             else if (Opcode == Instruction::Sub && LL == I) {
1447               if (Known2.isNonNegative() && Known3.isNegative())
1448                 Known.makeNonNegative();
1449               else if (Known2.isNegative() && Known3.isNonNegative())
1450                 Known.makeNegative();
1451             }
1452 
1453             // (mul nsw non-negative, non-negative) --> non-negative
1454             else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1455                      Known3.isNonNegative())
1456               Known.makeNonNegative();
1457           }
1458 
1459           break;
1460         }
1461       }
1462     }
1463 
1464     // Unreachable blocks may have zero-operand PHI nodes.
1465     if (P->getNumIncomingValues() == 0)
1466       break;
1467 
1468     // Otherwise take the unions of the known bit sets of the operands,
1469     // taking conservative care to avoid excessive recursion.
1470     if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1471       // Skip if every incoming value references to ourself.
1472       if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1473         break;
1474 
1475       Known.Zero.setAllBits();
1476       Known.One.setAllBits();
1477       for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1478         Value *IncValue = P->getIncomingValue(u);
1479         // Skip direct self references.
1480         if (IncValue == P) continue;
1481 
1482         // Change the context instruction to the "edge" that flows into the
1483         // phi. This is important because that is where the value is actually
1484         // "evaluated" even though it is used later somewhere else. (see also
1485         // D69571).
1486         Query RecQ = Q;
1487         RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1488 
1489         Known2 = KnownBits(BitWidth);
1490         // Recurse, but cap the recursion to one level, because we don't
1491         // want to waste time spinning around in loops.
1492         computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1493         Known = KnownBits::commonBits(Known, Known2);
1494         // If all bits have been ruled out, there's no need to check
1495         // more operands.
1496         if (Known.isUnknown())
1497           break;
1498       }
1499     }
1500     break;
1501   }
1502   case Instruction::Call:
1503   case Instruction::Invoke:
1504     // If range metadata is attached to this call, set known bits from that,
1505     // and then intersect with known bits based on other properties of the
1506     // function.
1507     if (MDNode *MD =
1508             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1509       computeKnownBitsFromRangeMetadata(*MD, Known);
1510     if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1511       computeKnownBits(RV, Known2, Depth + 1, Q);
1512       Known.Zero |= Known2.Zero;
1513       Known.One |= Known2.One;
1514     }
1515     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1516       switch (II->getIntrinsicID()) {
1517       default: break;
1518       case Intrinsic::abs: {
1519         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1520         bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1521         Known = Known2.abs(IntMinIsPoison);
1522         break;
1523       }
1524       case Intrinsic::bitreverse:
1525         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1526         Known.Zero |= Known2.Zero.reverseBits();
1527         Known.One |= Known2.One.reverseBits();
1528         break;
1529       case Intrinsic::bswap:
1530         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1531         Known.Zero |= Known2.Zero.byteSwap();
1532         Known.One |= Known2.One.byteSwap();
1533         break;
1534       case Intrinsic::ctlz: {
1535         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1536         // If we have a known 1, its position is our upper bound.
1537         unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1538         // If this call is undefined for 0, the result will be less than 2^n.
1539         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1540           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1541         unsigned LowBits = Log2_32(PossibleLZ)+1;
1542         Known.Zero.setBitsFrom(LowBits);
1543         break;
1544       }
1545       case Intrinsic::cttz: {
1546         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1547         // If we have a known 1, its position is our upper bound.
1548         unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1549         // If this call is undefined for 0, the result will be less than 2^n.
1550         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1551           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1552         unsigned LowBits = Log2_32(PossibleTZ)+1;
1553         Known.Zero.setBitsFrom(LowBits);
1554         break;
1555       }
1556       case Intrinsic::ctpop: {
1557         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1558         // We can bound the space the count needs.  Also, bits known to be zero
1559         // can't contribute to the population.
1560         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1561         unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1562         Known.Zero.setBitsFrom(LowBits);
1563         // TODO: we could bound KnownOne using the lower bound on the number
1564         // of bits which might be set provided by popcnt KnownOne2.
1565         break;
1566       }
1567       case Intrinsic::fshr:
1568       case Intrinsic::fshl: {
1569         const APInt *SA;
1570         if (!match(I->getOperand(2), m_APInt(SA)))
1571           break;
1572 
1573         // Normalize to funnel shift left.
1574         uint64_t ShiftAmt = SA->urem(BitWidth);
1575         if (II->getIntrinsicID() == Intrinsic::fshr)
1576           ShiftAmt = BitWidth - ShiftAmt;
1577 
1578         KnownBits Known3(BitWidth);
1579         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1580         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1581 
1582         Known.Zero =
1583             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1584         Known.One =
1585             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1586         break;
1587       }
1588       case Intrinsic::uadd_sat:
1589       case Intrinsic::usub_sat: {
1590         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1591         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1592         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1593 
1594         // Add: Leading ones of either operand are preserved.
1595         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1596         // as leading zeros in the result.
1597         unsigned LeadingKnown;
1598         if (IsAdd)
1599           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1600                                   Known2.countMinLeadingOnes());
1601         else
1602           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1603                                   Known2.countMinLeadingOnes());
1604 
1605         Known = KnownBits::computeForAddSub(
1606             IsAdd, /* NSW */ false, Known, Known2);
1607 
1608         // We select between the operation result and all-ones/zero
1609         // respectively, so we can preserve known ones/zeros.
1610         if (IsAdd) {
1611           Known.One.setHighBits(LeadingKnown);
1612           Known.Zero.clearAllBits();
1613         } else {
1614           Known.Zero.setHighBits(LeadingKnown);
1615           Known.One.clearAllBits();
1616         }
1617         break;
1618       }
1619       case Intrinsic::umin:
1620         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1621         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1622         Known = KnownBits::umin(Known, Known2);
1623         break;
1624       case Intrinsic::umax:
1625         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1626         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1627         Known = KnownBits::umax(Known, Known2);
1628         break;
1629       case Intrinsic::smin:
1630         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1631         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1632         Known = KnownBits::smin(Known, Known2);
1633         break;
1634       case Intrinsic::smax:
1635         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1636         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1637         Known = KnownBits::smax(Known, Known2);
1638         break;
1639       case Intrinsic::x86_sse42_crc32_64_64:
1640         Known.Zero.setBitsFrom(32);
1641         break;
1642       }
1643     }
1644     break;
1645   case Instruction::ShuffleVector: {
1646     auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1647     // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1648     if (!Shuf) {
1649       Known.resetAll();
1650       return;
1651     }
1652     // For undef elements, we don't know anything about the common state of
1653     // the shuffle result.
1654     APInt DemandedLHS, DemandedRHS;
1655     if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1656       Known.resetAll();
1657       return;
1658     }
1659     Known.One.setAllBits();
1660     Known.Zero.setAllBits();
1661     if (!!DemandedLHS) {
1662       const Value *LHS = Shuf->getOperand(0);
1663       computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1664       // If we don't know any bits, early out.
1665       if (Known.isUnknown())
1666         break;
1667     }
1668     if (!!DemandedRHS) {
1669       const Value *RHS = Shuf->getOperand(1);
1670       computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1671       Known = KnownBits::commonBits(Known, Known2);
1672     }
1673     break;
1674   }
1675   case Instruction::InsertElement: {
1676     const Value *Vec = I->getOperand(0);
1677     const Value *Elt = I->getOperand(1);
1678     auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1679     // Early out if the index is non-constant or out-of-range.
1680     unsigned NumElts = DemandedElts.getBitWidth();
1681     if (!CIdx || CIdx->getValue().uge(NumElts)) {
1682       Known.resetAll();
1683       return;
1684     }
1685     Known.One.setAllBits();
1686     Known.Zero.setAllBits();
1687     unsigned EltIdx = CIdx->getZExtValue();
1688     // Do we demand the inserted element?
1689     if (DemandedElts[EltIdx]) {
1690       computeKnownBits(Elt, Known, Depth + 1, Q);
1691       // If we don't know any bits, early out.
1692       if (Known.isUnknown())
1693         break;
1694     }
1695     // We don't need the base vector element that has been inserted.
1696     APInt DemandedVecElts = DemandedElts;
1697     DemandedVecElts.clearBit(EltIdx);
1698     if (!!DemandedVecElts) {
1699       computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1700       Known = KnownBits::commonBits(Known, Known2);
1701     }
1702     break;
1703   }
1704   case Instruction::ExtractElement: {
1705     // Look through extract element. If the index is non-constant or
1706     // out-of-range demand all elements, otherwise just the extracted element.
1707     const Value *Vec = I->getOperand(0);
1708     const Value *Idx = I->getOperand(1);
1709     auto *CIdx = dyn_cast<ConstantInt>(Idx);
1710     if (isa<ScalableVectorType>(Vec->getType())) {
1711       // FIXME: there's probably *something* we can do with scalable vectors
1712       Known.resetAll();
1713       break;
1714     }
1715     unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1716     APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
1717     if (CIdx && CIdx->getValue().ult(NumElts))
1718       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1719     computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1720     break;
1721   }
1722   case Instruction::ExtractValue:
1723     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1724       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1725       if (EVI->getNumIndices() != 1) break;
1726       if (EVI->getIndices()[0] == 0) {
1727         switch (II->getIntrinsicID()) {
1728         default: break;
1729         case Intrinsic::uadd_with_overflow:
1730         case Intrinsic::sadd_with_overflow:
1731           computeKnownBitsAddSub(true, II->getArgOperand(0),
1732                                  II->getArgOperand(1), false, DemandedElts,
1733                                  Known, Known2, Depth, Q);
1734           break;
1735         case Intrinsic::usub_with_overflow:
1736         case Intrinsic::ssub_with_overflow:
1737           computeKnownBitsAddSub(false, II->getArgOperand(0),
1738                                  II->getArgOperand(1), false, DemandedElts,
1739                                  Known, Known2, Depth, Q);
1740           break;
1741         case Intrinsic::umul_with_overflow:
1742         case Intrinsic::smul_with_overflow:
1743           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1744                               DemandedElts, Known, Known2, Depth, Q);
1745           break;
1746         }
1747       }
1748     }
1749     break;
1750   case Instruction::Freeze:
1751     if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1752                                   Depth + 1))
1753       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1754     break;
1755   }
1756 }
1757 
1758 /// Determine which bits of V are known to be either zero or one and return
1759 /// them.
1760 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1761                            unsigned Depth, const Query &Q) {
1762   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1763   computeKnownBits(V, DemandedElts, Known, Depth, Q);
1764   return Known;
1765 }
1766 
1767 /// Determine which bits of V are known to be either zero or one and return
1768 /// them.
1769 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1770   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1771   computeKnownBits(V, Known, Depth, Q);
1772   return Known;
1773 }
1774 
1775 /// Determine which bits of V are known to be either zero or one and return
1776 /// them in the Known bit set.
1777 ///
1778 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1779 /// we cannot optimize based on the assumption that it is zero without changing
1780 /// it to be an explicit zero.  If we don't change it to zero, other code could
1781 /// optimized based on the contradictory assumption that it is non-zero.
1782 /// Because instcombine aggressively folds operations with undef args anyway,
1783 /// this won't lose us code quality.
1784 ///
1785 /// This function is defined on values with integer type, values with pointer
1786 /// type, and vectors of integers.  In the case
1787 /// where V is a vector, known zero, and known one values are the
1788 /// same width as the vector element, and the bit is set only if it is true
1789 /// for all of the demanded elements in the vector specified by DemandedElts.
1790 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1791                       KnownBits &Known, unsigned Depth, const Query &Q) {
1792   if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1793     // No demanded elts or V is a scalable vector, better to assume we don't
1794     // know anything.
1795     Known.resetAll();
1796     return;
1797   }
1798 
1799   assert(V && "No Value?");
1800   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1801 
1802 #ifndef NDEBUG
1803   Type *Ty = V->getType();
1804   unsigned BitWidth = Known.getBitWidth();
1805 
1806   assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1807          "Not integer or pointer type!");
1808 
1809   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1810     assert(
1811         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1812         "DemandedElt width should equal the fixed vector number of elements");
1813   } else {
1814     assert(DemandedElts == APInt(1, 1) &&
1815            "DemandedElt width should be 1 for scalars");
1816   }
1817 
1818   Type *ScalarTy = Ty->getScalarType();
1819   if (ScalarTy->isPointerTy()) {
1820     assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1821            "V and Known should have same BitWidth");
1822   } else {
1823     assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1824            "V and Known should have same BitWidth");
1825   }
1826 #endif
1827 
1828   const APInt *C;
1829   if (match(V, m_APInt(C))) {
1830     // We know all of the bits for a scalar constant or a splat vector constant!
1831     Known.One = *C;
1832     Known.Zero = ~Known.One;
1833     return;
1834   }
1835   // Null and aggregate-zero are all-zeros.
1836   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1837     Known.setAllZero();
1838     return;
1839   }
1840   // Handle a constant vector by taking the intersection of the known bits of
1841   // each element.
1842   if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1843     // We know that CDV must be a vector of integers. Take the intersection of
1844     // each element.
1845     Known.Zero.setAllBits(); Known.One.setAllBits();
1846     for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1847       if (!DemandedElts[i])
1848         continue;
1849       APInt Elt = CDV->getElementAsAPInt(i);
1850       Known.Zero &= ~Elt;
1851       Known.One &= Elt;
1852     }
1853     return;
1854   }
1855 
1856   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1857     // We know that CV must be a vector of integers. Take the intersection of
1858     // each element.
1859     Known.Zero.setAllBits(); Known.One.setAllBits();
1860     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1861       if (!DemandedElts[i])
1862         continue;
1863       Constant *Element = CV->getAggregateElement(i);
1864       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1865       if (!ElementCI) {
1866         Known.resetAll();
1867         return;
1868       }
1869       const APInt &Elt = ElementCI->getValue();
1870       Known.Zero &= ~Elt;
1871       Known.One &= Elt;
1872     }
1873     return;
1874   }
1875 
1876   // Start out not knowing anything.
1877   Known.resetAll();
1878 
1879   // We can't imply anything about undefs.
1880   if (isa<UndefValue>(V))
1881     return;
1882 
1883   // There's no point in looking through other users of ConstantData for
1884   // assumptions.  Confirm that we've handled them all.
1885   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1886 
1887   // All recursive calls that increase depth must come after this.
1888   if (Depth == MaxAnalysisRecursionDepth)
1889     return;
1890 
1891   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1892   // the bits of its aliasee.
1893   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1894     if (!GA->isInterposable())
1895       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1896     return;
1897   }
1898 
1899   if (const Operator *I = dyn_cast<Operator>(V))
1900     computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
1901 
1902   // Aligned pointers have trailing zeros - refine Known.Zero set
1903   if (isa<PointerType>(V->getType())) {
1904     Align Alignment = V->getPointerAlignment(Q.DL);
1905     Known.Zero.setLowBits(Log2(Alignment));
1906   }
1907 
1908   // computeKnownBitsFromAssume strictly refines Known.
1909   // Therefore, we run them after computeKnownBitsFromOperator.
1910 
1911   // Check whether a nearby assume intrinsic can determine some known bits.
1912   computeKnownBitsFromAssume(V, Known, Depth, Q);
1913 
1914   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1915 }
1916 
1917 /// Return true if the given value is known to have exactly one
1918 /// bit set when defined. For vectors return true if every element is known to
1919 /// be a power of two when defined. Supports values with integer or pointer
1920 /// types and vectors of integers.
1921 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1922                             const Query &Q) {
1923   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1924 
1925   // Attempt to match against constants.
1926   if (OrZero && match(V, m_Power2OrZero()))
1927       return true;
1928   if (match(V, m_Power2()))
1929       return true;
1930 
1931   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
1932   // it is shifted off the end then the result is undefined.
1933   if (match(V, m_Shl(m_One(), m_Value())))
1934     return true;
1935 
1936   // (signmask) >>l X is clearly a power of two if the one is not shifted off
1937   // the bottom.  If it is shifted off the bottom then the result is undefined.
1938   if (match(V, m_LShr(m_SignMask(), m_Value())))
1939     return true;
1940 
1941   // The remaining tests are all recursive, so bail out if we hit the limit.
1942   if (Depth++ == MaxAnalysisRecursionDepth)
1943     return false;
1944 
1945   Value *X = nullptr, *Y = nullptr;
1946   // A shift left or a logical shift right of a power of two is a power of two
1947   // or zero.
1948   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1949                  match(V, m_LShr(m_Value(X), m_Value()))))
1950     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1951 
1952   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1953     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1954 
1955   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1956     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1957            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1958 
1959   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1960     // A power of two and'd with anything is a power of two or zero.
1961     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1962         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1963       return true;
1964     // X & (-X) is always a power of two or zero.
1965     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1966       return true;
1967     return false;
1968   }
1969 
1970   // Adding a power-of-two or zero to the same power-of-two or zero yields
1971   // either the original power-of-two, a larger power-of-two or zero.
1972   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1973     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1974     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1975         Q.IIQ.hasNoSignedWrap(VOBO)) {
1976       if (match(X, m_And(m_Specific(Y), m_Value())) ||
1977           match(X, m_And(m_Value(), m_Specific(Y))))
1978         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1979           return true;
1980       if (match(Y, m_And(m_Specific(X), m_Value())) ||
1981           match(Y, m_And(m_Value(), m_Specific(X))))
1982         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1983           return true;
1984 
1985       unsigned BitWidth = V->getType()->getScalarSizeInBits();
1986       KnownBits LHSBits(BitWidth);
1987       computeKnownBits(X, LHSBits, Depth, Q);
1988 
1989       KnownBits RHSBits(BitWidth);
1990       computeKnownBits(Y, RHSBits, Depth, Q);
1991       // If i8 V is a power of two or zero:
1992       //  ZeroBits: 1 1 1 0 1 1 1 1
1993       // ~ZeroBits: 0 0 0 1 0 0 0 0
1994       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1995         // If OrZero isn't set, we cannot give back a zero result.
1996         // Make sure either the LHS or RHS has a bit set.
1997         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1998           return true;
1999     }
2000   }
2001 
2002   // An exact divide or right shift can only shift off zero bits, so the result
2003   // is a power of two only if the first operand is a power of two and not
2004   // copying a sign bit (sdiv int_min, 2).
2005   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2006       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2007     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2008                                   Depth, Q);
2009   }
2010 
2011   return false;
2012 }
2013 
2014 /// Test whether a GEP's result is known to be non-null.
2015 ///
2016 /// Uses properties inherent in a GEP to try to determine whether it is known
2017 /// to be non-null.
2018 ///
2019 /// Currently this routine does not support vector GEPs.
2020 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2021                               const Query &Q) {
2022   const Function *F = nullptr;
2023   if (const Instruction *I = dyn_cast<Instruction>(GEP))
2024     F = I->getFunction();
2025 
2026   if (!GEP->isInBounds() ||
2027       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2028     return false;
2029 
2030   // FIXME: Support vector-GEPs.
2031   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2032 
2033   // If the base pointer is non-null, we cannot walk to a null address with an
2034   // inbounds GEP in address space zero.
2035   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2036     return true;
2037 
2038   // Walk the GEP operands and see if any operand introduces a non-zero offset.
2039   // If so, then the GEP cannot produce a null pointer, as doing so would
2040   // inherently violate the inbounds contract within address space zero.
2041   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2042        GTI != GTE; ++GTI) {
2043     // Struct types are easy -- they must always be indexed by a constant.
2044     if (StructType *STy = GTI.getStructTypeOrNull()) {
2045       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2046       unsigned ElementIdx = OpC->getZExtValue();
2047       const StructLayout *SL = Q.DL.getStructLayout(STy);
2048       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2049       if (ElementOffset > 0)
2050         return true;
2051       continue;
2052     }
2053 
2054     // If we have a zero-sized type, the index doesn't matter. Keep looping.
2055     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2056       continue;
2057 
2058     // Fast path the constant operand case both for efficiency and so we don't
2059     // increment Depth when just zipping down an all-constant GEP.
2060     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2061       if (!OpC->isZero())
2062         return true;
2063       continue;
2064     }
2065 
2066     // We post-increment Depth here because while isKnownNonZero increments it
2067     // as well, when we pop back up that increment won't persist. We don't want
2068     // to recurse 10k times just because we have 10k GEP operands. We don't
2069     // bail completely out because we want to handle constant GEPs regardless
2070     // of depth.
2071     if (Depth++ >= MaxAnalysisRecursionDepth)
2072       continue;
2073 
2074     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2075       return true;
2076   }
2077 
2078   return false;
2079 }
2080 
2081 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2082                                                   const Instruction *CtxI,
2083                                                   const DominatorTree *DT) {
2084   if (isa<Constant>(V))
2085     return false;
2086 
2087   if (!CtxI || !DT)
2088     return false;
2089 
2090   unsigned NumUsesExplored = 0;
2091   for (auto *U : V->users()) {
2092     // Avoid massive lists
2093     if (NumUsesExplored >= DomConditionsMaxUses)
2094       break;
2095     NumUsesExplored++;
2096 
2097     // If the value is used as an argument to a call or invoke, then argument
2098     // attributes may provide an answer about null-ness.
2099     if (const auto *CB = dyn_cast<CallBase>(U))
2100       if (auto *CalledFunc = CB->getCalledFunction())
2101         for (const Argument &Arg : CalledFunc->args())
2102           if (CB->getArgOperand(Arg.getArgNo()) == V &&
2103               Arg.hasNonNullAttr() && DT->dominates(CB, CtxI))
2104             return true;
2105 
2106     // If the value is used as a load/store, then the pointer must be non null.
2107     if (V == getLoadStorePointerOperand(U)) {
2108       const Instruction *I = cast<Instruction>(U);
2109       if (!NullPointerIsDefined(I->getFunction(),
2110                                 V->getType()->getPointerAddressSpace()) &&
2111           DT->dominates(I, CtxI))
2112         return true;
2113     }
2114 
2115     // Consider only compare instructions uniquely controlling a branch
2116     CmpInst::Predicate Pred;
2117     if (!match(const_cast<User *>(U),
2118                m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
2119         (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
2120       continue;
2121 
2122     SmallVector<const User *, 4> WorkList;
2123     SmallPtrSet<const User *, 4> Visited;
2124     for (auto *CmpU : U->users()) {
2125       assert(WorkList.empty() && "Should be!");
2126       if (Visited.insert(CmpU).second)
2127         WorkList.push_back(CmpU);
2128 
2129       while (!WorkList.empty()) {
2130         auto *Curr = WorkList.pop_back_val();
2131 
2132         // If a user is an AND, add all its users to the work list. We only
2133         // propagate "pred != null" condition through AND because it is only
2134         // correct to assume that all conditions of AND are met in true branch.
2135         // TODO: Support similar logic of OR and EQ predicate?
2136         if (Pred == ICmpInst::ICMP_NE)
2137           if (auto *BO = dyn_cast<BinaryOperator>(Curr))
2138             if (BO->getOpcode() == Instruction::And) {
2139               for (auto *BOU : BO->users())
2140                 if (Visited.insert(BOU).second)
2141                   WorkList.push_back(BOU);
2142               continue;
2143             }
2144 
2145         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2146           assert(BI->isConditional() && "uses a comparison!");
2147 
2148           BasicBlock *NonNullSuccessor =
2149               BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
2150           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2151           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2152             return true;
2153         } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
2154                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
2155           return true;
2156         }
2157       }
2158     }
2159   }
2160 
2161   return false;
2162 }
2163 
2164 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2165 /// ensure that the value it's attached to is never Value?  'RangeType' is
2166 /// is the type of the value described by the range.
2167 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2168   const unsigned NumRanges = Ranges->getNumOperands() / 2;
2169   assert(NumRanges >= 1);
2170   for (unsigned i = 0; i < NumRanges; ++i) {
2171     ConstantInt *Lower =
2172         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2173     ConstantInt *Upper =
2174         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2175     ConstantRange Range(Lower->getValue(), Upper->getValue());
2176     if (Range.contains(Value))
2177       return false;
2178   }
2179   return true;
2180 }
2181 
2182 /// Return true if the given value is known to be non-zero when defined. For
2183 /// vectors, return true if every demanded element is known to be non-zero when
2184 /// defined. For pointers, if the context instruction and dominator tree are
2185 /// specified, perform context-sensitive analysis and return true if the
2186 /// pointer couldn't possibly be null at the specified instruction.
2187 /// Supports values with integer or pointer type and vectors of integers.
2188 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2189                     const Query &Q) {
2190   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2191   // vector
2192   if (isa<ScalableVectorType>(V->getType()))
2193     return false;
2194 
2195   if (auto *C = dyn_cast<Constant>(V)) {
2196     if (C->isNullValue())
2197       return false;
2198     if (isa<ConstantInt>(C))
2199       // Must be non-zero due to null test above.
2200       return true;
2201 
2202     if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2203       // See the comment for IntToPtr/PtrToInt instructions below.
2204       if (CE->getOpcode() == Instruction::IntToPtr ||
2205           CE->getOpcode() == Instruction::PtrToInt)
2206         if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2207                 .getFixedSize() <=
2208             Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2209           return isKnownNonZero(CE->getOperand(0), Depth, Q);
2210     }
2211 
2212     // For constant vectors, check that all elements are undefined or known
2213     // non-zero to determine that the whole vector is known non-zero.
2214     if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2215       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2216         if (!DemandedElts[i])
2217           continue;
2218         Constant *Elt = C->getAggregateElement(i);
2219         if (!Elt || Elt->isNullValue())
2220           return false;
2221         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2222           return false;
2223       }
2224       return true;
2225     }
2226 
2227     // A global variable in address space 0 is non null unless extern weak
2228     // or an absolute symbol reference. Other address spaces may have null as a
2229     // valid address for a global, so we can't assume anything.
2230     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2231       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2232           GV->getType()->getAddressSpace() == 0)
2233         return true;
2234     } else
2235       return false;
2236   }
2237 
2238   if (auto *I = dyn_cast<Instruction>(V)) {
2239     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2240       // If the possible ranges don't contain zero, then the value is
2241       // definitely non-zero.
2242       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2243         const APInt ZeroValue(Ty->getBitWidth(), 0);
2244         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2245           return true;
2246       }
2247     }
2248   }
2249 
2250   if (isKnownNonZeroFromAssume(V, Q))
2251     return true;
2252 
2253   // Some of the tests below are recursive, so bail out if we hit the limit.
2254   if (Depth++ >= MaxAnalysisRecursionDepth)
2255     return false;
2256 
2257   // Check for pointer simplifications.
2258 
2259   if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2260     // Alloca never returns null, malloc might.
2261     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2262       return true;
2263 
2264     // A byval, inalloca may not be null in a non-default addres space. A
2265     // nonnull argument is assumed never 0.
2266     if (const Argument *A = dyn_cast<Argument>(V)) {
2267       if (((A->hasPassPointeeByValueCopyAttr() &&
2268             !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2269            A->hasNonNullAttr()))
2270         return true;
2271     }
2272 
2273     // A Load tagged with nonnull metadata is never null.
2274     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2275       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2276         return true;
2277 
2278     if (const auto *Call = dyn_cast<CallBase>(V)) {
2279       if (Call->isReturnNonNull())
2280         return true;
2281       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2282         return isKnownNonZero(RP, Depth, Q);
2283     }
2284   }
2285 
2286   if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2287     return true;
2288 
2289   // Check for recursive pointer simplifications.
2290   if (V->getType()->isPointerTy()) {
2291     // Look through bitcast operations, GEPs, and int2ptr instructions as they
2292     // do not alter the value, or at least not the nullness property of the
2293     // value, e.g., int2ptr is allowed to zero/sign extend the value.
2294     //
2295     // Note that we have to take special care to avoid looking through
2296     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2297     // as casts that can alter the value, e.g., AddrSpaceCasts.
2298     if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2299       return isGEPKnownNonNull(GEP, Depth, Q);
2300 
2301     if (auto *BCO = dyn_cast<BitCastOperator>(V))
2302       return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2303 
2304     if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2305       if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2306           Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2307         return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2308   }
2309 
2310   // Similar to int2ptr above, we can look through ptr2int here if the cast
2311   // is a no-op or an extend and not a truncate.
2312   if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2313     if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2314         Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2315       return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2316 
2317   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2318 
2319   // X | Y != 0 if X != 0 or Y != 0.
2320   Value *X = nullptr, *Y = nullptr;
2321   if (match(V, m_Or(m_Value(X), m_Value(Y))))
2322     return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2323            isKnownNonZero(Y, DemandedElts, Depth, Q);
2324 
2325   // ext X != 0 if X != 0.
2326   if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2327     return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2328 
2329   // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2330   // if the lowest bit is shifted off the end.
2331   if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2332     // shl nuw can't remove any non-zero bits.
2333     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2334     if (Q.IIQ.hasNoUnsignedWrap(BO))
2335       return isKnownNonZero(X, Depth, Q);
2336 
2337     KnownBits Known(BitWidth);
2338     computeKnownBits(X, DemandedElts, Known, Depth, Q);
2339     if (Known.One[0])
2340       return true;
2341   }
2342   // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2343   // defined if the sign bit is shifted off the end.
2344   else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2345     // shr exact can only shift out zero bits.
2346     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2347     if (BO->isExact())
2348       return isKnownNonZero(X, Depth, Q);
2349 
2350     KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2351     if (Known.isNegative())
2352       return true;
2353 
2354     // If the shifter operand is a constant, and all of the bits shifted
2355     // out are known to be zero, and X is known non-zero then at least one
2356     // non-zero bit must remain.
2357     if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2358       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2359       // Is there a known one in the portion not shifted out?
2360       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2361         return true;
2362       // Are all the bits to be shifted out known zero?
2363       if (Known.countMinTrailingZeros() >= ShiftVal)
2364         return isKnownNonZero(X, DemandedElts, Depth, Q);
2365     }
2366   }
2367   // div exact can only produce a zero if the dividend is zero.
2368   else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2369     return isKnownNonZero(X, DemandedElts, Depth, Q);
2370   }
2371   // X + Y.
2372   else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2373     KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2374     KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2375 
2376     // If X and Y are both non-negative (as signed values) then their sum is not
2377     // zero unless both X and Y are zero.
2378     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2379       if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2380           isKnownNonZero(Y, DemandedElts, Depth, Q))
2381         return true;
2382 
2383     // If X and Y are both negative (as signed values) then their sum is not
2384     // zero unless both X and Y equal INT_MIN.
2385     if (XKnown.isNegative() && YKnown.isNegative()) {
2386       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2387       // The sign bit of X is set.  If some other bit is set then X is not equal
2388       // to INT_MIN.
2389       if (XKnown.One.intersects(Mask))
2390         return true;
2391       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2392       // to INT_MIN.
2393       if (YKnown.One.intersects(Mask))
2394         return true;
2395     }
2396 
2397     // The sum of a non-negative number and a power of two is not zero.
2398     if (XKnown.isNonNegative() &&
2399         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2400       return true;
2401     if (YKnown.isNonNegative() &&
2402         isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2403       return true;
2404   }
2405   // X * Y.
2406   else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2407     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2408     // If X and Y are non-zero then so is X * Y as long as the multiplication
2409     // does not overflow.
2410     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2411         isKnownNonZero(X, DemandedElts, Depth, Q) &&
2412         isKnownNonZero(Y, DemandedElts, Depth, Q))
2413       return true;
2414   }
2415   // (C ? X : Y) != 0 if X != 0 and Y != 0.
2416   else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2417     if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2418         isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2419       return true;
2420   }
2421   // PHI
2422   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2423     // Try and detect a recurrence that monotonically increases from a
2424     // starting value, as these are common as induction variables.
2425     if (PN->getNumIncomingValues() == 2) {
2426       Value *Start = PN->getIncomingValue(0);
2427       Value *Induction = PN->getIncomingValue(1);
2428       if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2429         std::swap(Start, Induction);
2430       if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2431         if (!C->isZero() && !C->isNegative()) {
2432           ConstantInt *X;
2433           if (Q.IIQ.UseInstrInfo &&
2434               (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2435                match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2436               !X->isNegative())
2437             return true;
2438         }
2439       }
2440     }
2441     // Check if all incoming values are non-zero using recursion.
2442     Query RecQ = Q;
2443     unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2444     return llvm::all_of(PN->operands(), [&](const Use &U) {
2445       if (U.get() == PN)
2446         return true;
2447       RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2448       return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2449     });
2450   }
2451   // ExtractElement
2452   else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2453     const Value *Vec = EEI->getVectorOperand();
2454     const Value *Idx = EEI->getIndexOperand();
2455     auto *CIdx = dyn_cast<ConstantInt>(Idx);
2456     if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2457       unsigned NumElts = VecTy->getNumElements();
2458       APInt DemandedVecElts = APInt::getAllOnesValue(NumElts);
2459       if (CIdx && CIdx->getValue().ult(NumElts))
2460         DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2461       return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2462     }
2463   }
2464   // Freeze
2465   else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2466     auto *Op = FI->getOperand(0);
2467     if (isKnownNonZero(Op, Depth, Q) &&
2468         isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2469       return true;
2470   }
2471 
2472   KnownBits Known(BitWidth);
2473   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2474   return Known.One != 0;
2475 }
2476 
2477 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2478   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2479   // vector
2480   if (isa<ScalableVectorType>(V->getType()))
2481     return false;
2482 
2483   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2484   APInt DemandedElts =
2485       FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1);
2486   return isKnownNonZero(V, DemandedElts, Depth, Q);
2487 }
2488 
2489 /// Return true if V2 == V1 + X, where X is known non-zero.
2490 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2491                            const Query &Q) {
2492   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2493   if (!BO || BO->getOpcode() != Instruction::Add)
2494     return false;
2495   Value *Op = nullptr;
2496   if (V2 == BO->getOperand(0))
2497     Op = BO->getOperand(1);
2498   else if (V2 == BO->getOperand(1))
2499     Op = BO->getOperand(0);
2500   else
2501     return false;
2502   return isKnownNonZero(Op, Depth + 1, Q);
2503 }
2504 
2505 /// Return true if it is known that V1 != V2.
2506 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2507                             const Query &Q) {
2508   if (V1 == V2)
2509     return false;
2510   if (V1->getType() != V2->getType())
2511     // We can't look through casts yet.
2512     return false;
2513 
2514   if (Depth >= MaxAnalysisRecursionDepth)
2515     return false;
2516 
2517   // See if we can recurse through (exactly one of) our operands.
2518   auto *O1 = dyn_cast<Operator>(V1);
2519   auto *O2 = dyn_cast<Operator>(V2);
2520   if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2521     switch (O1->getOpcode()) {
2522     default: break;
2523     case Instruction::Add:
2524     case Instruction::Sub:
2525       // Assume operand order has been canonicalized
2526       if (O1->getOperand(0) == O2->getOperand(0))
2527         return isKnownNonEqual(O1->getOperand(1), O2->getOperand(1),
2528                                Depth + 1, Q);
2529       if (O1->getOperand(1) == O2->getOperand(1))
2530         return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2531                                Depth + 1, Q);
2532       break;
2533     case Instruction::SExt:
2534     case Instruction::ZExt:
2535       if (O1->getOperand(0)->getType() == O2->getOperand(0)->getType())
2536         return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
2537                                Depth + 1, Q);
2538       break;
2539     };
2540   }
2541 
2542   if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2543     return true;
2544 
2545   if (V1->getType()->isIntOrIntVectorTy()) {
2546     // Are any known bits in V1 contradictory to known bits in V2? If V1
2547     // has a known zero where V2 has a known one, they must not be equal.
2548     KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2549     KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2550 
2551     if (Known1.Zero.intersects(Known2.One) ||
2552         Known2.Zero.intersects(Known1.One))
2553       return true;
2554   }
2555   return false;
2556 }
2557 
2558 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2559 /// simplify operations downstream. Mask is known to be zero for bits that V
2560 /// cannot have.
2561 ///
2562 /// This function is defined on values with integer type, values with pointer
2563 /// type, and vectors of integers.  In the case
2564 /// where V is a vector, the mask, known zero, and known one values are the
2565 /// same width as the vector element, and the bit is set only if it is true
2566 /// for all of the elements in the vector.
2567 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2568                        const Query &Q) {
2569   KnownBits Known(Mask.getBitWidth());
2570   computeKnownBits(V, Known, Depth, Q);
2571   return Mask.isSubsetOf(Known.Zero);
2572 }
2573 
2574 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2575 // Returns the input and lower/upper bounds.
2576 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2577                                 const APInt *&CLow, const APInt *&CHigh) {
2578   assert(isa<Operator>(Select) &&
2579          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2580          "Input should be a Select!");
2581 
2582   const Value *LHS = nullptr, *RHS = nullptr;
2583   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2584   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2585     return false;
2586 
2587   if (!match(RHS, m_APInt(CLow)))
2588     return false;
2589 
2590   const Value *LHS2 = nullptr, *RHS2 = nullptr;
2591   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2592   if (getInverseMinMaxFlavor(SPF) != SPF2)
2593     return false;
2594 
2595   if (!match(RHS2, m_APInt(CHigh)))
2596     return false;
2597 
2598   if (SPF == SPF_SMIN)
2599     std::swap(CLow, CHigh);
2600 
2601   In = LHS2;
2602   return CLow->sle(*CHigh);
2603 }
2604 
2605 /// For vector constants, loop over the elements and find the constant with the
2606 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2607 /// or if any element was not analyzed; otherwise, return the count for the
2608 /// element with the minimum number of sign bits.
2609 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2610                                                  const APInt &DemandedElts,
2611                                                  unsigned TyBits) {
2612   const auto *CV = dyn_cast<Constant>(V);
2613   if (!CV || !isa<FixedVectorType>(CV->getType()))
2614     return 0;
2615 
2616   unsigned MinSignBits = TyBits;
2617   unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
2618   for (unsigned i = 0; i != NumElts; ++i) {
2619     if (!DemandedElts[i])
2620       continue;
2621     // If we find a non-ConstantInt, bail out.
2622     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2623     if (!Elt)
2624       return 0;
2625 
2626     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2627   }
2628 
2629   return MinSignBits;
2630 }
2631 
2632 static unsigned ComputeNumSignBitsImpl(const Value *V,
2633                                        const APInt &DemandedElts,
2634                                        unsigned Depth, const Query &Q);
2635 
2636 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
2637                                    unsigned Depth, const Query &Q) {
2638   unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
2639   assert(Result > 0 && "At least one sign bit needs to be present!");
2640   return Result;
2641 }
2642 
2643 /// Return the number of times the sign bit of the register is replicated into
2644 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2645 /// (itself), but other cases can give us information. For example, immediately
2646 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2647 /// other, so we return 3. For vectors, return the number of sign bits for the
2648 /// vector element with the minimum number of known sign bits of the demanded
2649 /// elements in the vector specified by DemandedElts.
2650 static unsigned ComputeNumSignBitsImpl(const Value *V,
2651                                        const APInt &DemandedElts,
2652                                        unsigned Depth, const Query &Q) {
2653   Type *Ty = V->getType();
2654 
2655   // FIXME: We currently have no way to represent the DemandedElts of a scalable
2656   // vector
2657   if (isa<ScalableVectorType>(Ty))
2658     return 1;
2659 
2660 #ifndef NDEBUG
2661   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2662 
2663   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2664     assert(
2665         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2666         "DemandedElt width should equal the fixed vector number of elements");
2667   } else {
2668     assert(DemandedElts == APInt(1, 1) &&
2669            "DemandedElt width should be 1 for scalars");
2670   }
2671 #endif
2672 
2673   // We return the minimum number of sign bits that are guaranteed to be present
2674   // in V, so for undef we have to conservatively return 1.  We don't have the
2675   // same behavior for poison though -- that's a FIXME today.
2676 
2677   Type *ScalarTy = Ty->getScalarType();
2678   unsigned TyBits = ScalarTy->isPointerTy() ?
2679     Q.DL.getPointerTypeSizeInBits(ScalarTy) :
2680     Q.DL.getTypeSizeInBits(ScalarTy);
2681 
2682   unsigned Tmp, Tmp2;
2683   unsigned FirstAnswer = 1;
2684 
2685   // Note that ConstantInt is handled by the general computeKnownBits case
2686   // below.
2687 
2688   if (Depth == MaxAnalysisRecursionDepth)
2689     return 1;
2690 
2691   if (auto *U = dyn_cast<Operator>(V)) {
2692     switch (Operator::getOpcode(V)) {
2693     default: break;
2694     case Instruction::SExt:
2695       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2696       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2697 
2698     case Instruction::SDiv: {
2699       const APInt *Denominator;
2700       // sdiv X, C -> adds log(C) sign bits.
2701       if (match(U->getOperand(1), m_APInt(Denominator))) {
2702 
2703         // Ignore non-positive denominator.
2704         if (!Denominator->isStrictlyPositive())
2705           break;
2706 
2707         // Calculate the incoming numerator bits.
2708         unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2709 
2710         // Add floor(log(C)) bits to the numerator bits.
2711         return std::min(TyBits, NumBits + Denominator->logBase2());
2712       }
2713       break;
2714     }
2715 
2716     case Instruction::SRem: {
2717       const APInt *Denominator;
2718       // srem X, C -> we know that the result is within [-C+1,C) when C is a
2719       // positive constant.  This let us put a lower bound on the number of sign
2720       // bits.
2721       if (match(U->getOperand(1), m_APInt(Denominator))) {
2722 
2723         // Ignore non-positive denominator.
2724         if (!Denominator->isStrictlyPositive())
2725           break;
2726 
2727         // Calculate the incoming numerator bits. SRem by a positive constant
2728         // can't lower the number of sign bits.
2729         unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2730 
2731         // Calculate the leading sign bit constraints by examining the
2732         // denominator.  Given that the denominator is positive, there are two
2733         // cases:
2734         //
2735         //  1. the numerator is positive. The result range is [0,C) and [0,C) u<
2736         //     (1 << ceilLogBase2(C)).
2737         //
2738         //  2. the numerator is negative. Then the result range is (-C,0] and
2739         //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2740         //
2741         // Thus a lower bound on the number of sign bits is `TyBits -
2742         // ceilLogBase2(C)`.
2743 
2744         unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2745         return std::max(NumrBits, ResBits);
2746       }
2747       break;
2748     }
2749 
2750     case Instruction::AShr: {
2751       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2752       // ashr X, C   -> adds C sign bits.  Vectors too.
2753       const APInt *ShAmt;
2754       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2755         if (ShAmt->uge(TyBits))
2756           break; // Bad shift.
2757         unsigned ShAmtLimited = ShAmt->getZExtValue();
2758         Tmp += ShAmtLimited;
2759         if (Tmp > TyBits) Tmp = TyBits;
2760       }
2761       return Tmp;
2762     }
2763     case Instruction::Shl: {
2764       const APInt *ShAmt;
2765       if (match(U->getOperand(1), m_APInt(ShAmt))) {
2766         // shl destroys sign bits.
2767         Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2768         if (ShAmt->uge(TyBits) ||   // Bad shift.
2769             ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2770         Tmp2 = ShAmt->getZExtValue();
2771         return Tmp - Tmp2;
2772       }
2773       break;
2774     }
2775     case Instruction::And:
2776     case Instruction::Or:
2777     case Instruction::Xor: // NOT is handled here.
2778       // Logical binary ops preserve the number of sign bits at the worst.
2779       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2780       if (Tmp != 1) {
2781         Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2782         FirstAnswer = std::min(Tmp, Tmp2);
2783         // We computed what we know about the sign bits as our first
2784         // answer. Now proceed to the generic code that uses
2785         // computeKnownBits, and pick whichever answer is better.
2786       }
2787       break;
2788 
2789     case Instruction::Select: {
2790       // If we have a clamp pattern, we know that the number of sign bits will
2791       // be the minimum of the clamp min/max range.
2792       const Value *X;
2793       const APInt *CLow, *CHigh;
2794       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2795         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2796 
2797       Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2798       if (Tmp == 1) break;
2799       Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2800       return std::min(Tmp, Tmp2);
2801     }
2802 
2803     case Instruction::Add:
2804       // Add can have at most one carry bit.  Thus we know that the output
2805       // is, at worst, one more bit than the inputs.
2806       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2807       if (Tmp == 1) break;
2808 
2809       // Special case decrementing a value (ADD X, -1):
2810       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2811         if (CRHS->isAllOnesValue()) {
2812           KnownBits Known(TyBits);
2813           computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2814 
2815           // If the input is known to be 0 or 1, the output is 0/-1, which is
2816           // all sign bits set.
2817           if ((Known.Zero | 1).isAllOnesValue())
2818             return TyBits;
2819 
2820           // If we are subtracting one from a positive number, there is no carry
2821           // out of the result.
2822           if (Known.isNonNegative())
2823             return Tmp;
2824         }
2825 
2826       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2827       if (Tmp2 == 1) break;
2828       return std::min(Tmp, Tmp2) - 1;
2829 
2830     case Instruction::Sub:
2831       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2832       if (Tmp2 == 1) break;
2833 
2834       // Handle NEG.
2835       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2836         if (CLHS->isNullValue()) {
2837           KnownBits Known(TyBits);
2838           computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2839           // If the input is known to be 0 or 1, the output is 0/-1, which is
2840           // all sign bits set.
2841           if ((Known.Zero | 1).isAllOnesValue())
2842             return TyBits;
2843 
2844           // If the input is known to be positive (the sign bit is known clear),
2845           // the output of the NEG has the same number of sign bits as the
2846           // input.
2847           if (Known.isNonNegative())
2848             return Tmp2;
2849 
2850           // Otherwise, we treat this like a SUB.
2851         }
2852 
2853       // Sub can have at most one carry bit.  Thus we know that the output
2854       // is, at worst, one more bit than the inputs.
2855       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2856       if (Tmp == 1) break;
2857       return std::min(Tmp, Tmp2) - 1;
2858 
2859     case Instruction::Mul: {
2860       // The output of the Mul can be at most twice the valid bits in the
2861       // inputs.
2862       unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2863       if (SignBitsOp0 == 1) break;
2864       unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2865       if (SignBitsOp1 == 1) break;
2866       unsigned OutValidBits =
2867           (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2868       return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2869     }
2870 
2871     case Instruction::PHI: {
2872       const PHINode *PN = cast<PHINode>(U);
2873       unsigned NumIncomingValues = PN->getNumIncomingValues();
2874       // Don't analyze large in-degree PHIs.
2875       if (NumIncomingValues > 4) break;
2876       // Unreachable blocks may have zero-operand PHI nodes.
2877       if (NumIncomingValues == 0) break;
2878 
2879       // Take the minimum of all incoming values.  This can't infinitely loop
2880       // because of our depth threshold.
2881       Query RecQ = Q;
2882       Tmp = TyBits;
2883       for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
2884         if (Tmp == 1) return Tmp;
2885         RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
2886         Tmp = std::min(
2887             Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
2888       }
2889       return Tmp;
2890     }
2891 
2892     case Instruction::Trunc:
2893       // FIXME: it's tricky to do anything useful for this, but it is an
2894       // important case for targets like X86.
2895       break;
2896 
2897     case Instruction::ExtractElement:
2898       // Look through extract element. At the moment we keep this simple and
2899       // skip tracking the specific element. But at least we might find
2900       // information valid for all elements of the vector (for example if vector
2901       // is sign extended, shifted, etc).
2902       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2903 
2904     case Instruction::ShuffleVector: {
2905       // Collect the minimum number of sign bits that are shared by every vector
2906       // element referenced by the shuffle.
2907       auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
2908       if (!Shuf) {
2909         // FIXME: Add support for shufflevector constant expressions.
2910         return 1;
2911       }
2912       APInt DemandedLHS, DemandedRHS;
2913       // For undef elements, we don't know anything about the common state of
2914       // the shuffle result.
2915       if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
2916         return 1;
2917       Tmp = std::numeric_limits<unsigned>::max();
2918       if (!!DemandedLHS) {
2919         const Value *LHS = Shuf->getOperand(0);
2920         Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
2921       }
2922       // If we don't know anything, early out and try computeKnownBits
2923       // fall-back.
2924       if (Tmp == 1)
2925         break;
2926       if (!!DemandedRHS) {
2927         const Value *RHS = Shuf->getOperand(1);
2928         Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
2929         Tmp = std::min(Tmp, Tmp2);
2930       }
2931       // If we don't know anything, early out and try computeKnownBits
2932       // fall-back.
2933       if (Tmp == 1)
2934         break;
2935       assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
2936       return Tmp;
2937     }
2938     case Instruction::Call: {
2939       if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
2940         switch (II->getIntrinsicID()) {
2941         default: break;
2942         case Intrinsic::abs:
2943           Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2944           if (Tmp == 1) break;
2945 
2946           // Absolute value reduces number of sign bits by at most 1.
2947           return Tmp - 1;
2948         }
2949       }
2950     }
2951     }
2952   }
2953 
2954   // Finally, if we can prove that the top bits of the result are 0's or 1's,
2955   // use this information.
2956 
2957   // If we can examine all elements of a vector constant successfully, we're
2958   // done (we can't do any better than that). If not, keep trying.
2959   if (unsigned VecSignBits =
2960           computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
2961     return VecSignBits;
2962 
2963   KnownBits Known(TyBits);
2964   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2965 
2966   // If we know that the sign bit is either zero or one, determine the number of
2967   // identical bits in the top of the input value.
2968   return std::max(FirstAnswer, Known.countMinSignBits());
2969 }
2970 
2971 /// This function computes the integer multiple of Base that equals V.
2972 /// If successful, it returns true and returns the multiple in
2973 /// Multiple. If unsuccessful, it returns false. It looks
2974 /// through SExt instructions only if LookThroughSExt is true.
2975 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2976                            bool LookThroughSExt, unsigned Depth) {
2977   assert(V && "No Value?");
2978   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2979   assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2980 
2981   Type *T = V->getType();
2982 
2983   ConstantInt *CI = dyn_cast<ConstantInt>(V);
2984 
2985   if (Base == 0)
2986     return false;
2987 
2988   if (Base == 1) {
2989     Multiple = V;
2990     return true;
2991   }
2992 
2993   ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2994   Constant *BaseVal = ConstantInt::get(T, Base);
2995   if (CO && CO == BaseVal) {
2996     // Multiple is 1.
2997     Multiple = ConstantInt::get(T, 1);
2998     return true;
2999   }
3000 
3001   if (CI && CI->getZExtValue() % Base == 0) {
3002     Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
3003     return true;
3004   }
3005 
3006   if (Depth == MaxAnalysisRecursionDepth) return false;
3007 
3008   Operator *I = dyn_cast<Operator>(V);
3009   if (!I) return false;
3010 
3011   switch (I->getOpcode()) {
3012   default: break;
3013   case Instruction::SExt:
3014     if (!LookThroughSExt) return false;
3015     // otherwise fall through to ZExt
3016     LLVM_FALLTHROUGH;
3017   case Instruction::ZExt:
3018     return ComputeMultiple(I->getOperand(0), Base, Multiple,
3019                            LookThroughSExt, Depth+1);
3020   case Instruction::Shl:
3021   case Instruction::Mul: {
3022     Value *Op0 = I->getOperand(0);
3023     Value *Op1 = I->getOperand(1);
3024 
3025     if (I->getOpcode() == Instruction::Shl) {
3026       ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
3027       if (!Op1CI) return false;
3028       // Turn Op0 << Op1 into Op0 * 2^Op1
3029       APInt Op1Int = Op1CI->getValue();
3030       uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
3031       APInt API(Op1Int.getBitWidth(), 0);
3032       API.setBit(BitToSet);
3033       Op1 = ConstantInt::get(V->getContext(), API);
3034     }
3035 
3036     Value *Mul0 = nullptr;
3037     if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
3038       if (Constant *Op1C = dyn_cast<Constant>(Op1))
3039         if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
3040           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3041               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3042             Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
3043           if (Op1C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3044               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3045             MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
3046 
3047           // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
3048           Multiple = ConstantExpr::getMul(MulC, Op1C);
3049           return true;
3050         }
3051 
3052       if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
3053         if (Mul0CI->getValue() == 1) {
3054           // V == Base * Op1, so return Op1
3055           Multiple = Op1;
3056           return true;
3057         }
3058     }
3059 
3060     Value *Mul1 = nullptr;
3061     if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
3062       if (Constant *Op0C = dyn_cast<Constant>(Op0))
3063         if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
3064           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() <
3065               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3066             Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
3067           if (Op0C->getType()->getPrimitiveSizeInBits().getFixedSize() >
3068               MulC->getType()->getPrimitiveSizeInBits().getFixedSize())
3069             MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
3070 
3071           // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
3072           Multiple = ConstantExpr::getMul(MulC, Op0C);
3073           return true;
3074         }
3075 
3076       if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
3077         if (Mul1CI->getValue() == 1) {
3078           // V == Base * Op0, so return Op0
3079           Multiple = Op0;
3080           return true;
3081         }
3082     }
3083   }
3084   }
3085 
3086   // We could not determine if V is a multiple of Base.
3087   return false;
3088 }
3089 
3090 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3091                                             const TargetLibraryInfo *TLI) {
3092   const Function *F = CB.getCalledFunction();
3093   if (!F)
3094     return Intrinsic::not_intrinsic;
3095 
3096   if (F->isIntrinsic())
3097     return F->getIntrinsicID();
3098 
3099   // We are going to infer semantics of a library function based on mapping it
3100   // to an LLVM intrinsic. Check that the library function is available from
3101   // this callbase and in this environment.
3102   LibFunc Func;
3103   if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3104       !CB.onlyReadsMemory())
3105     return Intrinsic::not_intrinsic;
3106 
3107   switch (Func) {
3108   default:
3109     break;
3110   case LibFunc_sin:
3111   case LibFunc_sinf:
3112   case LibFunc_sinl:
3113     return Intrinsic::sin;
3114   case LibFunc_cos:
3115   case LibFunc_cosf:
3116   case LibFunc_cosl:
3117     return Intrinsic::cos;
3118   case LibFunc_exp:
3119   case LibFunc_expf:
3120   case LibFunc_expl:
3121     return Intrinsic::exp;
3122   case LibFunc_exp2:
3123   case LibFunc_exp2f:
3124   case LibFunc_exp2l:
3125     return Intrinsic::exp2;
3126   case LibFunc_log:
3127   case LibFunc_logf:
3128   case LibFunc_logl:
3129     return Intrinsic::log;
3130   case LibFunc_log10:
3131   case LibFunc_log10f:
3132   case LibFunc_log10l:
3133     return Intrinsic::log10;
3134   case LibFunc_log2:
3135   case LibFunc_log2f:
3136   case LibFunc_log2l:
3137     return Intrinsic::log2;
3138   case LibFunc_fabs:
3139   case LibFunc_fabsf:
3140   case LibFunc_fabsl:
3141     return Intrinsic::fabs;
3142   case LibFunc_fmin:
3143   case LibFunc_fminf:
3144   case LibFunc_fminl:
3145     return Intrinsic::minnum;
3146   case LibFunc_fmax:
3147   case LibFunc_fmaxf:
3148   case LibFunc_fmaxl:
3149     return Intrinsic::maxnum;
3150   case LibFunc_copysign:
3151   case LibFunc_copysignf:
3152   case LibFunc_copysignl:
3153     return Intrinsic::copysign;
3154   case LibFunc_floor:
3155   case LibFunc_floorf:
3156   case LibFunc_floorl:
3157     return Intrinsic::floor;
3158   case LibFunc_ceil:
3159   case LibFunc_ceilf:
3160   case LibFunc_ceill:
3161     return Intrinsic::ceil;
3162   case LibFunc_trunc:
3163   case LibFunc_truncf:
3164   case LibFunc_truncl:
3165     return Intrinsic::trunc;
3166   case LibFunc_rint:
3167   case LibFunc_rintf:
3168   case LibFunc_rintl:
3169     return Intrinsic::rint;
3170   case LibFunc_nearbyint:
3171   case LibFunc_nearbyintf:
3172   case LibFunc_nearbyintl:
3173     return Intrinsic::nearbyint;
3174   case LibFunc_round:
3175   case LibFunc_roundf:
3176   case LibFunc_roundl:
3177     return Intrinsic::round;
3178   case LibFunc_roundeven:
3179   case LibFunc_roundevenf:
3180   case LibFunc_roundevenl:
3181     return Intrinsic::roundeven;
3182   case LibFunc_pow:
3183   case LibFunc_powf:
3184   case LibFunc_powl:
3185     return Intrinsic::pow;
3186   case LibFunc_sqrt:
3187   case LibFunc_sqrtf:
3188   case LibFunc_sqrtl:
3189     return Intrinsic::sqrt;
3190   }
3191 
3192   return Intrinsic::not_intrinsic;
3193 }
3194 
3195 /// Return true if we can prove that the specified FP value is never equal to
3196 /// -0.0.
3197 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3198 ///       that a value is not -0.0. It only guarantees that -0.0 may be treated
3199 ///       the same as +0.0 in floating-point ops.
3200 ///
3201 /// NOTE: this function will need to be revisited when we support non-default
3202 /// rounding modes!
3203 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3204                                 unsigned Depth) {
3205   if (auto *CFP = dyn_cast<ConstantFP>(V))
3206     return !CFP->getValueAPF().isNegZero();
3207 
3208   if (Depth == MaxAnalysisRecursionDepth)
3209     return false;
3210 
3211   auto *Op = dyn_cast<Operator>(V);
3212   if (!Op)
3213     return false;
3214 
3215   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3216   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3217     return true;
3218 
3219   // sitofp and uitofp turn into +0.0 for zero.
3220   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3221     return true;
3222 
3223   if (auto *Call = dyn_cast<CallInst>(Op)) {
3224     Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3225     switch (IID) {
3226     default:
3227       break;
3228     // sqrt(-0.0) = -0.0, no other negative results are possible.
3229     case Intrinsic::sqrt:
3230     case Intrinsic::canonicalize:
3231       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3232     // fabs(x) != -0.0
3233     case Intrinsic::fabs:
3234       return true;
3235     }
3236   }
3237 
3238   return false;
3239 }
3240 
3241 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3242 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3243 /// bit despite comparing equal.
3244 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3245                                             const TargetLibraryInfo *TLI,
3246                                             bool SignBitOnly,
3247                                             unsigned Depth) {
3248   // TODO: This function does not do the right thing when SignBitOnly is true
3249   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3250   // which flips the sign bits of NaNs.  See
3251   // https://llvm.org/bugs/show_bug.cgi?id=31702.
3252 
3253   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3254     return !CFP->getValueAPF().isNegative() ||
3255            (!SignBitOnly && CFP->getValueAPF().isZero());
3256   }
3257 
3258   // Handle vector of constants.
3259   if (auto *CV = dyn_cast<Constant>(V)) {
3260     if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3261       unsigned NumElts = CVFVTy->getNumElements();
3262       for (unsigned i = 0; i != NumElts; ++i) {
3263         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3264         if (!CFP)
3265           return false;
3266         if (CFP->getValueAPF().isNegative() &&
3267             (SignBitOnly || !CFP->getValueAPF().isZero()))
3268           return false;
3269       }
3270 
3271       // All non-negative ConstantFPs.
3272       return true;
3273     }
3274   }
3275 
3276   if (Depth == MaxAnalysisRecursionDepth)
3277     return false;
3278 
3279   const Operator *I = dyn_cast<Operator>(V);
3280   if (!I)
3281     return false;
3282 
3283   switch (I->getOpcode()) {
3284   default:
3285     break;
3286   // Unsigned integers are always nonnegative.
3287   case Instruction::UIToFP:
3288     return true;
3289   case Instruction::FMul:
3290   case Instruction::FDiv:
3291     // X * X is always non-negative or a NaN.
3292     // X / X is always exactly 1.0 or a NaN.
3293     if (I->getOperand(0) == I->getOperand(1) &&
3294         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3295       return true;
3296 
3297     LLVM_FALLTHROUGH;
3298   case Instruction::FAdd:
3299   case Instruction::FRem:
3300     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3301                                            Depth + 1) &&
3302            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3303                                            Depth + 1);
3304   case Instruction::Select:
3305     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3306                                            Depth + 1) &&
3307            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3308                                            Depth + 1);
3309   case Instruction::FPExt:
3310   case Instruction::FPTrunc:
3311     // Widening/narrowing never change sign.
3312     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3313                                            Depth + 1);
3314   case Instruction::ExtractElement:
3315     // Look through extract element. At the moment we keep this simple and skip
3316     // tracking the specific element. But at least we might find information
3317     // valid for all elements of the vector.
3318     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3319                                            Depth + 1);
3320   case Instruction::Call:
3321     const auto *CI = cast<CallInst>(I);
3322     Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3323     switch (IID) {
3324     default:
3325       break;
3326     case Intrinsic::maxnum: {
3327       Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3328       auto isPositiveNum = [&](Value *V) {
3329         if (SignBitOnly) {
3330           // With SignBitOnly, this is tricky because the result of
3331           // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3332           // a constant strictly greater than 0.0.
3333           const APFloat *C;
3334           return match(V, m_APFloat(C)) &&
3335                  *C > APFloat::getZero(C->getSemantics());
3336         }
3337 
3338         // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3339         // maxnum can't be ordered-less-than-zero.
3340         return isKnownNeverNaN(V, TLI) &&
3341                cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3342       };
3343 
3344       // TODO: This could be improved. We could also check that neither operand
3345       //       has its sign bit set (and at least 1 is not-NAN?).
3346       return isPositiveNum(V0) || isPositiveNum(V1);
3347     }
3348 
3349     case Intrinsic::maximum:
3350       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3351                                              Depth + 1) ||
3352              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3353                                              Depth + 1);
3354     case Intrinsic::minnum:
3355     case Intrinsic::minimum:
3356       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3357                                              Depth + 1) &&
3358              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3359                                              Depth + 1);
3360     case Intrinsic::exp:
3361     case Intrinsic::exp2:
3362     case Intrinsic::fabs:
3363       return true;
3364 
3365     case Intrinsic::sqrt:
3366       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3367       if (!SignBitOnly)
3368         return true;
3369       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3370                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3371 
3372     case Intrinsic::powi:
3373       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3374         // powi(x,n) is non-negative if n is even.
3375         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3376           return true;
3377       }
3378       // TODO: This is not correct.  Given that exp is an integer, here are the
3379       // ways that pow can return a negative value:
3380       //
3381       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3382       //   pow(-0, exp)   --> -inf if exp is negative odd.
3383       //   pow(-0, exp)   --> -0 if exp is positive odd.
3384       //   pow(-inf, exp) --> -0 if exp is negative odd.
3385       //   pow(-inf, exp) --> -inf if exp is positive odd.
3386       //
3387       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3388       // but we must return false if x == -0.  Unfortunately we do not currently
3389       // have a way of expressing this constraint.  See details in
3390       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3391       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3392                                              Depth + 1);
3393 
3394     case Intrinsic::fma:
3395     case Intrinsic::fmuladd:
3396       // x*x+y is non-negative if y is non-negative.
3397       return I->getOperand(0) == I->getOperand(1) &&
3398              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3399              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3400                                              Depth + 1);
3401     }
3402     break;
3403   }
3404   return false;
3405 }
3406 
3407 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3408                                        const TargetLibraryInfo *TLI) {
3409   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3410 }
3411 
3412 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3413   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3414 }
3415 
3416 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3417                                 unsigned Depth) {
3418   assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3419 
3420   // If we're told that infinities won't happen, assume they won't.
3421   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3422     if (FPMathOp->hasNoInfs())
3423       return true;
3424 
3425   // Handle scalar constants.
3426   if (auto *CFP = dyn_cast<ConstantFP>(V))
3427     return !CFP->isInfinity();
3428 
3429   if (Depth == MaxAnalysisRecursionDepth)
3430     return false;
3431 
3432   if (auto *Inst = dyn_cast<Instruction>(V)) {
3433     switch (Inst->getOpcode()) {
3434     case Instruction::Select: {
3435       return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3436              isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3437     }
3438     case Instruction::SIToFP:
3439     case Instruction::UIToFP: {
3440       // Get width of largest magnitude integer (remove a bit if signed).
3441       // This still works for a signed minimum value because the largest FP
3442       // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3443       int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3444       if (Inst->getOpcode() == Instruction::SIToFP)
3445         --IntSize;
3446 
3447       // If the exponent of the largest finite FP value can hold the largest
3448       // integer, the result of the cast must be finite.
3449       Type *FPTy = Inst->getType()->getScalarType();
3450       return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3451     }
3452     default:
3453       break;
3454     }
3455   }
3456 
3457   // try to handle fixed width vector constants
3458   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3459   if (VFVTy && isa<Constant>(V)) {
3460     // For vectors, verify that each element is not infinity.
3461     unsigned NumElts = VFVTy->getNumElements();
3462     for (unsigned i = 0; i != NumElts; ++i) {
3463       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3464       if (!Elt)
3465         return false;
3466       if (isa<UndefValue>(Elt))
3467         continue;
3468       auto *CElt = dyn_cast<ConstantFP>(Elt);
3469       if (!CElt || CElt->isInfinity())
3470         return false;
3471     }
3472     // All elements were confirmed non-infinity or undefined.
3473     return true;
3474   }
3475 
3476   // was not able to prove that V never contains infinity
3477   return false;
3478 }
3479 
3480 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3481                            unsigned Depth) {
3482   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3483 
3484   // If we're told that NaNs won't happen, assume they won't.
3485   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3486     if (FPMathOp->hasNoNaNs())
3487       return true;
3488 
3489   // Handle scalar constants.
3490   if (auto *CFP = dyn_cast<ConstantFP>(V))
3491     return !CFP->isNaN();
3492 
3493   if (Depth == MaxAnalysisRecursionDepth)
3494     return false;
3495 
3496   if (auto *Inst = dyn_cast<Instruction>(V)) {
3497     switch (Inst->getOpcode()) {
3498     case Instruction::FAdd:
3499     case Instruction::FSub:
3500       // Adding positive and negative infinity produces NaN.
3501       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3502              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3503              (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3504               isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3505 
3506     case Instruction::FMul:
3507       // Zero multiplied with infinity produces NaN.
3508       // FIXME: If neither side can be zero fmul never produces NaN.
3509       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3510              isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3511              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3512              isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3513 
3514     case Instruction::FDiv:
3515     case Instruction::FRem:
3516       // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3517       return false;
3518 
3519     case Instruction::Select: {
3520       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3521              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3522     }
3523     case Instruction::SIToFP:
3524     case Instruction::UIToFP:
3525       return true;
3526     case Instruction::FPTrunc:
3527     case Instruction::FPExt:
3528       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3529     default:
3530       break;
3531     }
3532   }
3533 
3534   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3535     switch (II->getIntrinsicID()) {
3536     case Intrinsic::canonicalize:
3537     case Intrinsic::fabs:
3538     case Intrinsic::copysign:
3539     case Intrinsic::exp:
3540     case Intrinsic::exp2:
3541     case Intrinsic::floor:
3542     case Intrinsic::ceil:
3543     case Intrinsic::trunc:
3544     case Intrinsic::rint:
3545     case Intrinsic::nearbyint:
3546     case Intrinsic::round:
3547     case Intrinsic::roundeven:
3548       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3549     case Intrinsic::sqrt:
3550       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3551              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3552     case Intrinsic::minnum:
3553     case Intrinsic::maxnum:
3554       // If either operand is not NaN, the result is not NaN.
3555       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3556              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3557     default:
3558       return false;
3559     }
3560   }
3561 
3562   // Try to handle fixed width vector constants
3563   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3564   if (VFVTy && isa<Constant>(V)) {
3565     // For vectors, verify that each element is not NaN.
3566     unsigned NumElts = VFVTy->getNumElements();
3567     for (unsigned i = 0; i != NumElts; ++i) {
3568       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3569       if (!Elt)
3570         return false;
3571       if (isa<UndefValue>(Elt))
3572         continue;
3573       auto *CElt = dyn_cast<ConstantFP>(Elt);
3574       if (!CElt || CElt->isNaN())
3575         return false;
3576     }
3577     // All elements were confirmed not-NaN or undefined.
3578     return true;
3579   }
3580 
3581   // Was not able to prove that V never contains NaN
3582   return false;
3583 }
3584 
3585 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3586 
3587   // All byte-wide stores are splatable, even of arbitrary variables.
3588   if (V->getType()->isIntegerTy(8))
3589     return V;
3590 
3591   LLVMContext &Ctx = V->getContext();
3592 
3593   // Undef don't care.
3594   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3595   if (isa<UndefValue>(V))
3596     return UndefInt8;
3597 
3598   // Return Undef for zero-sized type.
3599   if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3600     return UndefInt8;
3601 
3602   Constant *C = dyn_cast<Constant>(V);
3603   if (!C) {
3604     // Conceptually, we could handle things like:
3605     //   %a = zext i8 %X to i16
3606     //   %b = shl i16 %a, 8
3607     //   %c = or i16 %a, %b
3608     // but until there is an example that actually needs this, it doesn't seem
3609     // worth worrying about.
3610     return nullptr;
3611   }
3612 
3613   // Handle 'null' ConstantArrayZero etc.
3614   if (C->isNullValue())
3615     return Constant::getNullValue(Type::getInt8Ty(Ctx));
3616 
3617   // Constant floating-point values can be handled as integer values if the
3618   // corresponding integer value is "byteable".  An important case is 0.0.
3619   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3620     Type *Ty = nullptr;
3621     if (CFP->getType()->isHalfTy())
3622       Ty = Type::getInt16Ty(Ctx);
3623     else if (CFP->getType()->isFloatTy())
3624       Ty = Type::getInt32Ty(Ctx);
3625     else if (CFP->getType()->isDoubleTy())
3626       Ty = Type::getInt64Ty(Ctx);
3627     // Don't handle long double formats, which have strange constraints.
3628     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3629               : nullptr;
3630   }
3631 
3632   // We can handle constant integers that are multiple of 8 bits.
3633   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3634     if (CI->getBitWidth() % 8 == 0) {
3635       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3636       if (!CI->getValue().isSplat(8))
3637         return nullptr;
3638       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3639     }
3640   }
3641 
3642   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3643     if (CE->getOpcode() == Instruction::IntToPtr) {
3644       if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3645         unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3646         return isBytewiseValue(
3647             ConstantExpr::getIntegerCast(CE->getOperand(0),
3648                                          Type::getIntNTy(Ctx, BitWidth), false),
3649             DL);
3650       }
3651     }
3652   }
3653 
3654   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3655     if (LHS == RHS)
3656       return LHS;
3657     if (!LHS || !RHS)
3658       return nullptr;
3659     if (LHS == UndefInt8)
3660       return RHS;
3661     if (RHS == UndefInt8)
3662       return LHS;
3663     return nullptr;
3664   };
3665 
3666   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3667     Value *Val = UndefInt8;
3668     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3669       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3670         return nullptr;
3671     return Val;
3672   }
3673 
3674   if (isa<ConstantAggregate>(C)) {
3675     Value *Val = UndefInt8;
3676     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3677       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3678         return nullptr;
3679     return Val;
3680   }
3681 
3682   // Don't try to handle the handful of other constants.
3683   return nullptr;
3684 }
3685 
3686 // This is the recursive version of BuildSubAggregate. It takes a few different
3687 // arguments. Idxs is the index within the nested struct From that we are
3688 // looking at now (which is of type IndexedType). IdxSkip is the number of
3689 // indices from Idxs that should be left out when inserting into the resulting
3690 // struct. To is the result struct built so far, new insertvalue instructions
3691 // build on that.
3692 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3693                                 SmallVectorImpl<unsigned> &Idxs,
3694                                 unsigned IdxSkip,
3695                                 Instruction *InsertBefore) {
3696   StructType *STy = dyn_cast<StructType>(IndexedType);
3697   if (STy) {
3698     // Save the original To argument so we can modify it
3699     Value *OrigTo = To;
3700     // General case, the type indexed by Idxs is a struct
3701     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3702       // Process each struct element recursively
3703       Idxs.push_back(i);
3704       Value *PrevTo = To;
3705       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3706                              InsertBefore);
3707       Idxs.pop_back();
3708       if (!To) {
3709         // Couldn't find any inserted value for this index? Cleanup
3710         while (PrevTo != OrigTo) {
3711           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3712           PrevTo = Del->getAggregateOperand();
3713           Del->eraseFromParent();
3714         }
3715         // Stop processing elements
3716         break;
3717       }
3718     }
3719     // If we successfully found a value for each of our subaggregates
3720     if (To)
3721       return To;
3722   }
3723   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3724   // the struct's elements had a value that was inserted directly. In the latter
3725   // case, perhaps we can't determine each of the subelements individually, but
3726   // we might be able to find the complete struct somewhere.
3727 
3728   // Find the value that is at that particular spot
3729   Value *V = FindInsertedValue(From, Idxs);
3730 
3731   if (!V)
3732     return nullptr;
3733 
3734   // Insert the value in the new (sub) aggregate
3735   return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3736                                  "tmp", InsertBefore);
3737 }
3738 
3739 // This helper takes a nested struct and extracts a part of it (which is again a
3740 // struct) into a new value. For example, given the struct:
3741 // { a, { b, { c, d }, e } }
3742 // and the indices "1, 1" this returns
3743 // { c, d }.
3744 //
3745 // It does this by inserting an insertvalue for each element in the resulting
3746 // struct, as opposed to just inserting a single struct. This will only work if
3747 // each of the elements of the substruct are known (ie, inserted into From by an
3748 // insertvalue instruction somewhere).
3749 //
3750 // All inserted insertvalue instructions are inserted before InsertBefore
3751 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
3752                                 Instruction *InsertBefore) {
3753   assert(InsertBefore && "Must have someplace to insert!");
3754   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3755                                                              idx_range);
3756   Value *To = UndefValue::get(IndexedType);
3757   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3758   unsigned IdxSkip = Idxs.size();
3759 
3760   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3761 }
3762 
3763 /// Given an aggregate and a sequence of indices, see if the scalar value
3764 /// indexed is already around as a register, for example if it was inserted
3765 /// directly into the aggregate.
3766 ///
3767 /// If InsertBefore is not null, this function will duplicate (modified)
3768 /// insertvalues when a part of a nested struct is extracted.
3769 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
3770                                Instruction *InsertBefore) {
3771   // Nothing to index? Just return V then (this is useful at the end of our
3772   // recursion).
3773   if (idx_range.empty())
3774     return V;
3775   // We have indices, so V should have an indexable type.
3776   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3777          "Not looking at a struct or array?");
3778   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3779          "Invalid indices for type?");
3780 
3781   if (Constant *C = dyn_cast<Constant>(V)) {
3782     C = C->getAggregateElement(idx_range[0]);
3783     if (!C) return nullptr;
3784     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3785   }
3786 
3787   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3788     // Loop the indices for the insertvalue instruction in parallel with the
3789     // requested indices
3790     const unsigned *req_idx = idx_range.begin();
3791     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3792          i != e; ++i, ++req_idx) {
3793       if (req_idx == idx_range.end()) {
3794         // We can't handle this without inserting insertvalues
3795         if (!InsertBefore)
3796           return nullptr;
3797 
3798         // The requested index identifies a part of a nested aggregate. Handle
3799         // this specially. For example,
3800         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3801         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3802         // %C = extractvalue {i32, { i32, i32 } } %B, 1
3803         // This can be changed into
3804         // %A = insertvalue {i32, i32 } undef, i32 10, 0
3805         // %C = insertvalue {i32, i32 } %A, i32 11, 1
3806         // which allows the unused 0,0 element from the nested struct to be
3807         // removed.
3808         return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3809                                  InsertBefore);
3810       }
3811 
3812       // This insert value inserts something else than what we are looking for.
3813       // See if the (aggregate) value inserted into has the value we are
3814       // looking for, then.
3815       if (*req_idx != *i)
3816         return FindInsertedValue(I->getAggregateOperand(), idx_range,
3817                                  InsertBefore);
3818     }
3819     // If we end up here, the indices of the insertvalue match with those
3820     // requested (though possibly only partially). Now we recursively look at
3821     // the inserted value, passing any remaining indices.
3822     return FindInsertedValue(I->getInsertedValueOperand(),
3823                              makeArrayRef(req_idx, idx_range.end()),
3824                              InsertBefore);
3825   }
3826 
3827   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3828     // If we're extracting a value from an aggregate that was extracted from
3829     // something else, we can extract from that something else directly instead.
3830     // However, we will need to chain I's indices with the requested indices.
3831 
3832     // Calculate the number of indices required
3833     unsigned size = I->getNumIndices() + idx_range.size();
3834     // Allocate some space to put the new indices in
3835     SmallVector<unsigned, 5> Idxs;
3836     Idxs.reserve(size);
3837     // Add indices from the extract value instruction
3838     Idxs.append(I->idx_begin(), I->idx_end());
3839 
3840     // Add requested indices
3841     Idxs.append(idx_range.begin(), idx_range.end());
3842 
3843     assert(Idxs.size() == size
3844            && "Number of indices added not correct?");
3845 
3846     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3847   }
3848   // Otherwise, we don't know (such as, extracting from a function return value
3849   // or load instruction)
3850   return nullptr;
3851 }
3852 
3853 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
3854                                        unsigned CharSize) {
3855   // Make sure the GEP has exactly three arguments.
3856   if (GEP->getNumOperands() != 3)
3857     return false;
3858 
3859   // Make sure the index-ee is a pointer to array of \p CharSize integers.
3860   // CharSize.
3861   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
3862   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3863     return false;
3864 
3865   // Check to make sure that the first operand of the GEP is an integer and
3866   // has value 0 so that we are sure we're indexing into the initializer.
3867   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3868   if (!FirstIdx || !FirstIdx->isZero())
3869     return false;
3870 
3871   return true;
3872 }
3873 
3874 bool llvm::getConstantDataArrayInfo(const Value *V,
3875                                     ConstantDataArraySlice &Slice,
3876                                     unsigned ElementSize, uint64_t Offset) {
3877   assert(V);
3878 
3879   // Look through bitcast instructions and geps.
3880   V = V->stripPointerCasts();
3881 
3882   // If the value is a GEP instruction or constant expression, treat it as an
3883   // offset.
3884   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3885     // The GEP operator should be based on a pointer to string constant, and is
3886     // indexing into the string constant.
3887     if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3888       return false;
3889 
3890     // If the second index isn't a ConstantInt, then this is a variable index
3891     // into the array.  If this occurs, we can't say anything meaningful about
3892     // the string.
3893     uint64_t StartIdx = 0;
3894     if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3895       StartIdx = CI->getZExtValue();
3896     else
3897       return false;
3898     return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3899                                     StartIdx + Offset);
3900   }
3901 
3902   // The GEP instruction, constant or instruction, must reference a global
3903   // variable that is a constant and is initialized. The referenced constant
3904   // initializer is the array that we'll use for optimization.
3905   const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3906   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3907     return false;
3908 
3909   const ConstantDataArray *Array;
3910   ArrayType *ArrayTy;
3911   if (GV->getInitializer()->isNullValue()) {
3912     Type *GVTy = GV->getValueType();
3913     if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3914       // A zeroinitializer for the array; there is no ConstantDataArray.
3915       Array = nullptr;
3916     } else {
3917       const DataLayout &DL = GV->getParent()->getDataLayout();
3918       uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
3919       uint64_t Length = SizeInBytes / (ElementSize / 8);
3920       if (Length <= Offset)
3921         return false;
3922 
3923       Slice.Array = nullptr;
3924       Slice.Offset = 0;
3925       Slice.Length = Length - Offset;
3926       return true;
3927     }
3928   } else {
3929     // This must be a ConstantDataArray.
3930     Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3931     if (!Array)
3932       return false;
3933     ArrayTy = Array->getType();
3934   }
3935   if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3936     return false;
3937 
3938   uint64_t NumElts = ArrayTy->getArrayNumElements();
3939   if (Offset > NumElts)
3940     return false;
3941 
3942   Slice.Array = Array;
3943   Slice.Offset = Offset;
3944   Slice.Length = NumElts - Offset;
3945   return true;
3946 }
3947 
3948 /// This function computes the length of a null-terminated C string pointed to
3949 /// by V. If successful, it returns true and returns the string in Str.
3950 /// If unsuccessful, it returns false.
3951 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
3952                                  uint64_t Offset, bool TrimAtNul) {
3953   ConstantDataArraySlice Slice;
3954   if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3955     return false;
3956 
3957   if (Slice.Array == nullptr) {
3958     if (TrimAtNul) {
3959       Str = StringRef();
3960       return true;
3961     }
3962     if (Slice.Length == 1) {
3963       Str = StringRef("", 1);
3964       return true;
3965     }
3966     // We cannot instantiate a StringRef as we do not have an appropriate string
3967     // of 0s at hand.
3968     return false;
3969   }
3970 
3971   // Start out with the entire array in the StringRef.
3972   Str = Slice.Array->getAsString();
3973   // Skip over 'offset' bytes.
3974   Str = Str.substr(Slice.Offset);
3975 
3976   if (TrimAtNul) {
3977     // Trim off the \0 and anything after it.  If the array is not nul
3978     // terminated, we just return the whole end of string.  The client may know
3979     // some other way that the string is length-bound.
3980     Str = Str.substr(0, Str.find('\0'));
3981   }
3982   return true;
3983 }
3984 
3985 // These next two are very similar to the above, but also look through PHI
3986 // nodes.
3987 // TODO: See if we can integrate these two together.
3988 
3989 /// If we can compute the length of the string pointed to by
3990 /// the specified pointer, return 'len+1'.  If we can't, return 0.
3991 static uint64_t GetStringLengthH(const Value *V,
3992                                  SmallPtrSetImpl<const PHINode*> &PHIs,
3993                                  unsigned CharSize) {
3994   // Look through noop bitcast instructions.
3995   V = V->stripPointerCasts();
3996 
3997   // If this is a PHI node, there are two cases: either we have already seen it
3998   // or we haven't.
3999   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4000     if (!PHIs.insert(PN).second)
4001       return ~0ULL;  // already in the set.
4002 
4003     // If it was new, see if all the input strings are the same length.
4004     uint64_t LenSoFar = ~0ULL;
4005     for (Value *IncValue : PN->incoming_values()) {
4006       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4007       if (Len == 0) return 0; // Unknown length -> unknown.
4008 
4009       if (Len == ~0ULL) continue;
4010 
4011       if (Len != LenSoFar && LenSoFar != ~0ULL)
4012         return 0;    // Disagree -> unknown.
4013       LenSoFar = Len;
4014     }
4015 
4016     // Success, all agree.
4017     return LenSoFar;
4018   }
4019 
4020   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4021   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4022     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4023     if (Len1 == 0) return 0;
4024     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4025     if (Len2 == 0) return 0;
4026     if (Len1 == ~0ULL) return Len2;
4027     if (Len2 == ~0ULL) return Len1;
4028     if (Len1 != Len2) return 0;
4029     return Len1;
4030   }
4031 
4032   // Otherwise, see if we can read the string.
4033   ConstantDataArraySlice Slice;
4034   if (!getConstantDataArrayInfo(V, Slice, CharSize))
4035     return 0;
4036 
4037   if (Slice.Array == nullptr)
4038     return 1;
4039 
4040   // Search for nul characters
4041   unsigned NullIndex = 0;
4042   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4043     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4044       break;
4045   }
4046 
4047   return NullIndex + 1;
4048 }
4049 
4050 /// If we can compute the length of the string pointed to by
4051 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4052 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4053   if (!V->getType()->isPointerTy())
4054     return 0;
4055 
4056   SmallPtrSet<const PHINode*, 32> PHIs;
4057   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4058   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4059   // an empty string as a length.
4060   return Len == ~0ULL ? 1 : Len;
4061 }
4062 
4063 const Value *
4064 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4065                                            bool MustPreserveNullness) {
4066   assert(Call &&
4067          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4068   if (const Value *RV = Call->getReturnedArgOperand())
4069     return RV;
4070   // This can be used only as a aliasing property.
4071   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4072           Call, MustPreserveNullness))
4073     return Call->getArgOperand(0);
4074   return nullptr;
4075 }
4076 
4077 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4078     const CallBase *Call, bool MustPreserveNullness) {
4079   switch (Call->getIntrinsicID()) {
4080   case Intrinsic::launder_invariant_group:
4081   case Intrinsic::strip_invariant_group:
4082   case Intrinsic::aarch64_irg:
4083   case Intrinsic::aarch64_tagp:
4084     return true;
4085   case Intrinsic::ptrmask:
4086     return !MustPreserveNullness;
4087   default:
4088     return false;
4089   }
4090 }
4091 
4092 /// \p PN defines a loop-variant pointer to an object.  Check if the
4093 /// previous iteration of the loop was referring to the same object as \p PN.
4094 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4095                                          const LoopInfo *LI) {
4096   // Find the loop-defined value.
4097   Loop *L = LI->getLoopFor(PN->getParent());
4098   if (PN->getNumIncomingValues() != 2)
4099     return true;
4100 
4101   // Find the value from previous iteration.
4102   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4103   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4104     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4105   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4106     return true;
4107 
4108   // If a new pointer is loaded in the loop, the pointer references a different
4109   // object in every iteration.  E.g.:
4110   //    for (i)
4111   //       int *p = a[i];
4112   //       ...
4113   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4114     if (!L->isLoopInvariant(Load->getPointerOperand()))
4115       return false;
4116   return true;
4117 }
4118 
4119 Value *llvm::getUnderlyingObject(Value *V, unsigned MaxLookup) {
4120   if (!V->getType()->isPointerTy())
4121     return V;
4122   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4123     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
4124       V = GEP->getPointerOperand();
4125     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4126                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4127       V = cast<Operator>(V)->getOperand(0);
4128       if (!V->getType()->isPointerTy())
4129         return V;
4130     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
4131       if (GA->isInterposable())
4132         return V;
4133       V = GA->getAliasee();
4134     } else {
4135       if (auto *PHI = dyn_cast<PHINode>(V)) {
4136         // Look through single-arg phi nodes created by LCSSA.
4137         if (PHI->getNumIncomingValues() == 1) {
4138           V = PHI->getIncomingValue(0);
4139           continue;
4140         }
4141       } else if (auto *Call = dyn_cast<CallBase>(V)) {
4142         // CaptureTracking can know about special capturing properties of some
4143         // intrinsics like launder.invariant.group, that can't be expressed with
4144         // the attributes, but have properties like returning aliasing pointer.
4145         // Because some analysis may assume that nocaptured pointer is not
4146         // returned from some special intrinsic (because function would have to
4147         // be marked with returns attribute), it is crucial to use this function
4148         // because it should be in sync with CaptureTracking. Not using it may
4149         // cause weird miscompilations where 2 aliasing pointers are assumed to
4150         // noalias.
4151         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4152           V = RP;
4153           continue;
4154         }
4155       }
4156 
4157       return V;
4158     }
4159     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4160   }
4161   return V;
4162 }
4163 
4164 void llvm::getUnderlyingObjects(const Value *V,
4165                                 SmallVectorImpl<const Value *> &Objects,
4166                                 LoopInfo *LI, unsigned MaxLookup) {
4167   SmallPtrSet<const Value *, 4> Visited;
4168   SmallVector<const Value *, 4> Worklist;
4169   Worklist.push_back(V);
4170   do {
4171     const Value *P = Worklist.pop_back_val();
4172     P = getUnderlyingObject(P, MaxLookup);
4173 
4174     if (!Visited.insert(P).second)
4175       continue;
4176 
4177     if (auto *SI = dyn_cast<SelectInst>(P)) {
4178       Worklist.push_back(SI->getTrueValue());
4179       Worklist.push_back(SI->getFalseValue());
4180       continue;
4181     }
4182 
4183     if (auto *PN = dyn_cast<PHINode>(P)) {
4184       // If this PHI changes the underlying object in every iteration of the
4185       // loop, don't look through it.  Consider:
4186       //   int **A;
4187       //   for (i) {
4188       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
4189       //     Curr = A[i];
4190       //     *Prev, *Curr;
4191       //
4192       // Prev is tracking Curr one iteration behind so they refer to different
4193       // underlying objects.
4194       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4195           isSameUnderlyingObjectInLoop(PN, LI))
4196         for (Value *IncValue : PN->incoming_values())
4197           Worklist.push_back(IncValue);
4198       continue;
4199     }
4200 
4201     Objects.push_back(P);
4202   } while (!Worklist.empty());
4203 }
4204 
4205 /// This is the function that does the work of looking through basic
4206 /// ptrtoint+arithmetic+inttoptr sequences.
4207 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4208   do {
4209     if (const Operator *U = dyn_cast<Operator>(V)) {
4210       // If we find a ptrtoint, we can transfer control back to the
4211       // regular getUnderlyingObjectFromInt.
4212       if (U->getOpcode() == Instruction::PtrToInt)
4213         return U->getOperand(0);
4214       // If we find an add of a constant, a multiplied value, or a phi, it's
4215       // likely that the other operand will lead us to the base
4216       // object. We don't have to worry about the case where the
4217       // object address is somehow being computed by the multiply,
4218       // because our callers only care when the result is an
4219       // identifiable object.
4220       if (U->getOpcode() != Instruction::Add ||
4221           (!isa<ConstantInt>(U->getOperand(1)) &&
4222            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4223            !isa<PHINode>(U->getOperand(1))))
4224         return V;
4225       V = U->getOperand(0);
4226     } else {
4227       return V;
4228     }
4229     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4230   } while (true);
4231 }
4232 
4233 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4234 /// ptrtoint+arithmetic+inttoptr sequences.
4235 /// It returns false if unidentified object is found in getUnderlyingObjects.
4236 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4237                                           SmallVectorImpl<Value *> &Objects) {
4238   SmallPtrSet<const Value *, 16> Visited;
4239   SmallVector<const Value *, 4> Working(1, V);
4240   do {
4241     V = Working.pop_back_val();
4242 
4243     SmallVector<const Value *, 4> Objs;
4244     getUnderlyingObjects(V, Objs);
4245 
4246     for (const Value *V : Objs) {
4247       if (!Visited.insert(V).second)
4248         continue;
4249       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4250         const Value *O =
4251           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4252         if (O->getType()->isPointerTy()) {
4253           Working.push_back(O);
4254           continue;
4255         }
4256       }
4257       // If getUnderlyingObjects fails to find an identifiable object,
4258       // getUnderlyingObjectsForCodeGen also fails for safety.
4259       if (!isIdentifiedObject(V)) {
4260         Objects.clear();
4261         return false;
4262       }
4263       Objects.push_back(const_cast<Value *>(V));
4264     }
4265   } while (!Working.empty());
4266   return true;
4267 }
4268 
4269 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4270   AllocaInst *Result = nullptr;
4271   SmallPtrSet<Value *, 4> Visited;
4272   SmallVector<Value *, 4> Worklist;
4273 
4274   auto AddWork = [&](Value *V) {
4275     if (Visited.insert(V).second)
4276       Worklist.push_back(V);
4277   };
4278 
4279   AddWork(V);
4280   do {
4281     V = Worklist.pop_back_val();
4282     assert(Visited.count(V));
4283 
4284     if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4285       if (Result && Result != AI)
4286         return nullptr;
4287       Result = AI;
4288     } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4289       AddWork(CI->getOperand(0));
4290     } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4291       for (Value *IncValue : PN->incoming_values())
4292         AddWork(IncValue);
4293     } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4294       AddWork(SI->getTrueValue());
4295       AddWork(SI->getFalseValue());
4296     } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4297       if (OffsetZero && !GEP->hasAllZeroIndices())
4298         return nullptr;
4299       AddWork(GEP->getPointerOperand());
4300     } else {
4301       return nullptr;
4302     }
4303   } while (!Worklist.empty());
4304 
4305   return Result;
4306 }
4307 
4308 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4309     const Value *V, bool AllowLifetime, bool AllowDroppable) {
4310   for (const User *U : V->users()) {
4311     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4312     if (!II)
4313       return false;
4314 
4315     if (AllowLifetime && II->isLifetimeStartOrEnd())
4316       continue;
4317 
4318     if (AllowDroppable && II->isDroppable())
4319       continue;
4320 
4321     return false;
4322   }
4323   return true;
4324 }
4325 
4326 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4327   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4328       V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4329 }
4330 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4331   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4332       V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4333 }
4334 
4335 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4336   if (!LI.isUnordered())
4337     return true;
4338   const Function &F = *LI.getFunction();
4339   // Speculative load may create a race that did not exist in the source.
4340   return F.hasFnAttribute(Attribute::SanitizeThread) ||
4341     // Speculative load may load data from dirty regions.
4342     F.hasFnAttribute(Attribute::SanitizeAddress) ||
4343     F.hasFnAttribute(Attribute::SanitizeHWAddress);
4344 }
4345 
4346 
4347 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
4348                                         const Instruction *CtxI,
4349                                         const DominatorTree *DT) {
4350   const Operator *Inst = dyn_cast<Operator>(V);
4351   if (!Inst)
4352     return false;
4353 
4354   for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
4355     if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
4356       if (C->canTrap())
4357         return false;
4358 
4359   switch (Inst->getOpcode()) {
4360   default:
4361     return true;
4362   case Instruction::UDiv:
4363   case Instruction::URem: {
4364     // x / y is undefined if y == 0.
4365     const APInt *V;
4366     if (match(Inst->getOperand(1), m_APInt(V)))
4367       return *V != 0;
4368     return false;
4369   }
4370   case Instruction::SDiv:
4371   case Instruction::SRem: {
4372     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4373     const APInt *Numerator, *Denominator;
4374     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4375       return false;
4376     // We cannot hoist this division if the denominator is 0.
4377     if (*Denominator == 0)
4378       return false;
4379     // It's safe to hoist if the denominator is not 0 or -1.
4380     if (*Denominator != -1)
4381       return true;
4382     // At this point we know that the denominator is -1.  It is safe to hoist as
4383     // long we know that the numerator is not INT_MIN.
4384     if (match(Inst->getOperand(0), m_APInt(Numerator)))
4385       return !Numerator->isMinSignedValue();
4386     // The numerator *might* be MinSignedValue.
4387     return false;
4388   }
4389   case Instruction::Load: {
4390     const LoadInst *LI = cast<LoadInst>(Inst);
4391     if (mustSuppressSpeculation(*LI))
4392       return false;
4393     const DataLayout &DL = LI->getModule()->getDataLayout();
4394     return isDereferenceableAndAlignedPointer(
4395         LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
4396         DL, CtxI, DT);
4397   }
4398   case Instruction::Call: {
4399     auto *CI = cast<const CallInst>(Inst);
4400     const Function *Callee = CI->getCalledFunction();
4401 
4402     // The called function could have undefined behavior or side-effects, even
4403     // if marked readnone nounwind.
4404     return Callee && Callee->isSpeculatable();
4405   }
4406   case Instruction::VAArg:
4407   case Instruction::Alloca:
4408   case Instruction::Invoke:
4409   case Instruction::CallBr:
4410   case Instruction::PHI:
4411   case Instruction::Store:
4412   case Instruction::Ret:
4413   case Instruction::Br:
4414   case Instruction::IndirectBr:
4415   case Instruction::Switch:
4416   case Instruction::Unreachable:
4417   case Instruction::Fence:
4418   case Instruction::AtomicRMW:
4419   case Instruction::AtomicCmpXchg:
4420   case Instruction::LandingPad:
4421   case Instruction::Resume:
4422   case Instruction::CatchSwitch:
4423   case Instruction::CatchPad:
4424   case Instruction::CatchRet:
4425   case Instruction::CleanupPad:
4426   case Instruction::CleanupRet:
4427     return false; // Misc instructions which have effects
4428   }
4429 }
4430 
4431 bool llvm::mayBeMemoryDependent(const Instruction &I) {
4432   return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
4433 }
4434 
4435 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4436 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4437   switch (OR) {
4438     case ConstantRange::OverflowResult::MayOverflow:
4439       return OverflowResult::MayOverflow;
4440     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4441       return OverflowResult::AlwaysOverflowsLow;
4442     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4443       return OverflowResult::AlwaysOverflowsHigh;
4444     case ConstantRange::OverflowResult::NeverOverflows:
4445       return OverflowResult::NeverOverflows;
4446   }
4447   llvm_unreachable("Unknown OverflowResult");
4448 }
4449 
4450 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4451 static ConstantRange computeConstantRangeIncludingKnownBits(
4452     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4453     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4454     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4455   KnownBits Known = computeKnownBits(
4456       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4457   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4458   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4459   ConstantRange::PreferredRangeType RangeType =
4460       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4461   return CR1.intersectWith(CR2, RangeType);
4462 }
4463 
4464 OverflowResult llvm::computeOverflowForUnsignedMul(
4465     const Value *LHS, const Value *RHS, const DataLayout &DL,
4466     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4467     bool UseInstrInfo) {
4468   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4469                                         nullptr, UseInstrInfo);
4470   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4471                                         nullptr, UseInstrInfo);
4472   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4473   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4474   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4475 }
4476 
4477 OverflowResult
4478 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4479                                   const DataLayout &DL, AssumptionCache *AC,
4480                                   const Instruction *CxtI,
4481                                   const DominatorTree *DT, bool UseInstrInfo) {
4482   // Multiplying n * m significant bits yields a result of n + m significant
4483   // bits. If the total number of significant bits does not exceed the
4484   // result bit width (minus 1), there is no overflow.
4485   // This means if we have enough leading sign bits in the operands
4486   // we can guarantee that the result does not overflow.
4487   // Ref: "Hacker's Delight" by Henry Warren
4488   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4489 
4490   // Note that underestimating the number of sign bits gives a more
4491   // conservative answer.
4492   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4493                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4494 
4495   // First handle the easy case: if we have enough sign bits there's
4496   // definitely no overflow.
4497   if (SignBits > BitWidth + 1)
4498     return OverflowResult::NeverOverflows;
4499 
4500   // There are two ambiguous cases where there can be no overflow:
4501   //   SignBits == BitWidth + 1    and
4502   //   SignBits == BitWidth
4503   // The second case is difficult to check, therefore we only handle the
4504   // first case.
4505   if (SignBits == BitWidth + 1) {
4506     // It overflows only when both arguments are negative and the true
4507     // product is exactly the minimum negative number.
4508     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4509     // For simplicity we just check if at least one side is not negative.
4510     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4511                                           nullptr, UseInstrInfo);
4512     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4513                                           nullptr, UseInstrInfo);
4514     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4515       return OverflowResult::NeverOverflows;
4516   }
4517   return OverflowResult::MayOverflow;
4518 }
4519 
4520 OverflowResult llvm::computeOverflowForUnsignedAdd(
4521     const Value *LHS, const Value *RHS, const DataLayout &DL,
4522     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4523     bool UseInstrInfo) {
4524   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4525       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4526       nullptr, UseInstrInfo);
4527   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4528       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4529       nullptr, UseInstrInfo);
4530   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4531 }
4532 
4533 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4534                                                   const Value *RHS,
4535                                                   const AddOperator *Add,
4536                                                   const DataLayout &DL,
4537                                                   AssumptionCache *AC,
4538                                                   const Instruction *CxtI,
4539                                                   const DominatorTree *DT) {
4540   if (Add && Add->hasNoSignedWrap()) {
4541     return OverflowResult::NeverOverflows;
4542   }
4543 
4544   // If LHS and RHS each have at least two sign bits, the addition will look
4545   // like
4546   //
4547   // XX..... +
4548   // YY.....
4549   //
4550   // If the carry into the most significant position is 0, X and Y can't both
4551   // be 1 and therefore the carry out of the addition is also 0.
4552   //
4553   // If the carry into the most significant position is 1, X and Y can't both
4554   // be 0 and therefore the carry out of the addition is also 1.
4555   //
4556   // Since the carry into the most significant position is always equal to
4557   // the carry out of the addition, there is no signed overflow.
4558   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4559       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4560     return OverflowResult::NeverOverflows;
4561 
4562   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4563       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4564   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4565       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4566   OverflowResult OR =
4567       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4568   if (OR != OverflowResult::MayOverflow)
4569     return OR;
4570 
4571   // The remaining code needs Add to be available. Early returns if not so.
4572   if (!Add)
4573     return OverflowResult::MayOverflow;
4574 
4575   // If the sign of Add is the same as at least one of the operands, this add
4576   // CANNOT overflow. If this can be determined from the known bits of the
4577   // operands the above signedAddMayOverflow() check will have already done so.
4578   // The only other way to improve on the known bits is from an assumption, so
4579   // call computeKnownBitsFromAssume() directly.
4580   bool LHSOrRHSKnownNonNegative =
4581       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4582   bool LHSOrRHSKnownNegative =
4583       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4584   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4585     KnownBits AddKnown(LHSRange.getBitWidth());
4586     computeKnownBitsFromAssume(
4587         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4588     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4589         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4590       return OverflowResult::NeverOverflows;
4591   }
4592 
4593   return OverflowResult::MayOverflow;
4594 }
4595 
4596 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4597                                                    const Value *RHS,
4598                                                    const DataLayout &DL,
4599                                                    AssumptionCache *AC,
4600                                                    const Instruction *CxtI,
4601                                                    const DominatorTree *DT) {
4602   // Checking for conditions implied by dominating conditions may be expensive.
4603   // Limit it to usub_with_overflow calls for now.
4604   if (match(CxtI,
4605             m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
4606     if (auto C =
4607             isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
4608       if (*C)
4609         return OverflowResult::NeverOverflows;
4610       return OverflowResult::AlwaysOverflowsLow;
4611     }
4612   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4613       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4614   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4615       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4616   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4617 }
4618 
4619 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
4620                                                  const Value *RHS,
4621                                                  const DataLayout &DL,
4622                                                  AssumptionCache *AC,
4623                                                  const Instruction *CxtI,
4624                                                  const DominatorTree *DT) {
4625   // If LHS and RHS each have at least two sign bits, the subtraction
4626   // cannot overflow.
4627   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4628       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4629     return OverflowResult::NeverOverflows;
4630 
4631   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4632       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4633   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4634       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4635   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4636 }
4637 
4638 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
4639                                      const DominatorTree &DT) {
4640   SmallVector<const BranchInst *, 2> GuardingBranches;
4641   SmallVector<const ExtractValueInst *, 2> Results;
4642 
4643   for (const User *U : WO->users()) {
4644     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4645       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4646 
4647       if (EVI->getIndices()[0] == 0)
4648         Results.push_back(EVI);
4649       else {
4650         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4651 
4652         for (const auto *U : EVI->users())
4653           if (const auto *B = dyn_cast<BranchInst>(U)) {
4654             assert(B->isConditional() && "How else is it using an i1?");
4655             GuardingBranches.push_back(B);
4656           }
4657       }
4658     } else {
4659       // We are using the aggregate directly in a way we don't want to analyze
4660       // here (storing it to a global, say).
4661       return false;
4662     }
4663   }
4664 
4665   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4666     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4667     if (!NoWrapEdge.isSingleEdge())
4668       return false;
4669 
4670     // Check if all users of the add are provably no-wrap.
4671     for (const auto *Result : Results) {
4672       // If the extractvalue itself is not executed on overflow, the we don't
4673       // need to check each use separately, since domination is transitive.
4674       if (DT.dominates(NoWrapEdge, Result->getParent()))
4675         continue;
4676 
4677       for (auto &RU : Result->uses())
4678         if (!DT.dominates(NoWrapEdge, RU))
4679           return false;
4680     }
4681 
4682     return true;
4683   };
4684 
4685   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4686 }
4687 
4688 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly) {
4689   // See whether I has flags that may create poison
4690   if (const auto *OvOp = dyn_cast<OverflowingBinaryOperator>(Op)) {
4691     if (OvOp->hasNoSignedWrap() || OvOp->hasNoUnsignedWrap())
4692       return true;
4693   }
4694   if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(Op))
4695     if (ExactOp->isExact())
4696       return true;
4697   if (const auto *FP = dyn_cast<FPMathOperator>(Op)) {
4698     auto FMF = FP->getFastMathFlags();
4699     if (FMF.noNaNs() || FMF.noInfs())
4700       return true;
4701   }
4702 
4703   unsigned Opcode = Op->getOpcode();
4704 
4705   // Check whether opcode is a poison/undef-generating operation
4706   switch (Opcode) {
4707   case Instruction::Shl:
4708   case Instruction::AShr:
4709   case Instruction::LShr: {
4710     // Shifts return poison if shiftwidth is larger than the bitwidth.
4711     if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
4712       SmallVector<Constant *, 4> ShiftAmounts;
4713       if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
4714         unsigned NumElts = FVTy->getNumElements();
4715         for (unsigned i = 0; i < NumElts; ++i)
4716           ShiftAmounts.push_back(C->getAggregateElement(i));
4717       } else if (isa<ScalableVectorType>(C->getType()))
4718         return true; // Can't tell, just return true to be safe
4719       else
4720         ShiftAmounts.push_back(C);
4721 
4722       bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
4723         auto *CI = dyn_cast<ConstantInt>(C);
4724         return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
4725       });
4726       return !Safe;
4727     }
4728     return true;
4729   }
4730   case Instruction::FPToSI:
4731   case Instruction::FPToUI:
4732     // fptosi/ui yields poison if the resulting value does not fit in the
4733     // destination type.
4734     return true;
4735   case Instruction::Call:
4736   case Instruction::CallBr:
4737   case Instruction::Invoke: {
4738     const auto *CB = cast<CallBase>(Op);
4739     return !CB->hasRetAttr(Attribute::NoUndef);
4740   }
4741   case Instruction::InsertElement:
4742   case Instruction::ExtractElement: {
4743     // If index exceeds the length of the vector, it returns poison
4744     auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
4745     unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
4746     auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
4747     if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
4748       return true;
4749     return false;
4750   }
4751   case Instruction::ShuffleVector: {
4752     // shufflevector may return undef.
4753     if (PoisonOnly)
4754       return false;
4755     ArrayRef<int> Mask = isa<ConstantExpr>(Op)
4756                              ? cast<ConstantExpr>(Op)->getShuffleMask()
4757                              : cast<ShuffleVectorInst>(Op)->getShuffleMask();
4758     return is_contained(Mask, UndefMaskElem);
4759   }
4760   case Instruction::FNeg:
4761   case Instruction::PHI:
4762   case Instruction::Select:
4763   case Instruction::URem:
4764   case Instruction::SRem:
4765   case Instruction::ExtractValue:
4766   case Instruction::InsertValue:
4767   case Instruction::Freeze:
4768   case Instruction::ICmp:
4769   case Instruction::FCmp:
4770     return false;
4771   case Instruction::GetElementPtr: {
4772     const auto *GEP = cast<GEPOperator>(Op);
4773     return GEP->isInBounds();
4774   }
4775   default: {
4776     const auto *CE = dyn_cast<ConstantExpr>(Op);
4777     if (isa<CastInst>(Op) || (CE && CE->isCast()))
4778       return false;
4779     else if (Instruction::isBinaryOp(Opcode))
4780       return false;
4781     // Be conservative and return true.
4782     return true;
4783   }
4784   }
4785 }
4786 
4787 bool llvm::canCreateUndefOrPoison(const Operator *Op) {
4788   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false);
4789 }
4790 
4791 bool llvm::canCreatePoison(const Operator *Op) {
4792   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true);
4793 }
4794 
4795 static bool programUndefinedIfUndefOrPoison(const Value *V,
4796                                             bool PoisonOnly);
4797 
4798 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
4799                                              AssumptionCache *AC,
4800                                              const Instruction *CtxI,
4801                                              const DominatorTree *DT,
4802                                              unsigned Depth, bool PoisonOnly) {
4803   if (Depth >= MaxAnalysisRecursionDepth)
4804     return false;
4805 
4806   if (isa<MetadataAsValue>(V))
4807     return false;
4808 
4809   if (const auto *A = dyn_cast<Argument>(V)) {
4810     if (A->hasAttribute(Attribute::NoUndef))
4811       return true;
4812   }
4813 
4814   if (auto *C = dyn_cast<Constant>(V)) {
4815     if (isa<UndefValue>(C))
4816       return PoisonOnly;
4817 
4818     if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
4819         isa<ConstantPointerNull>(C) || isa<Function>(C))
4820       return true;
4821 
4822     if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
4823       return (PoisonOnly || !C->containsUndefElement()) &&
4824              !C->containsConstantExpression();
4825   }
4826 
4827   // Strip cast operations from a pointer value.
4828   // Note that stripPointerCastsSameRepresentation can strip off getelementptr
4829   // inbounds with zero offset. To guarantee that the result isn't poison, the
4830   // stripped pointer is checked as it has to be pointing into an allocated
4831   // object or be null `null` to ensure `inbounds` getelement pointers with a
4832   // zero offset could not produce poison.
4833   // It can strip off addrspacecast that do not change bit representation as
4834   // well. We believe that such addrspacecast is equivalent to no-op.
4835   auto *StrippedV = V->stripPointerCastsSameRepresentation();
4836   if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
4837       isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
4838     return true;
4839 
4840   auto OpCheck = [&](const Value *V) {
4841     return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
4842                                             PoisonOnly);
4843   };
4844 
4845   if (auto *Opr = dyn_cast<Operator>(V)) {
4846     // If the value is a freeze instruction, then it can never
4847     // be undef or poison.
4848     if (isa<FreezeInst>(V))
4849       return true;
4850 
4851     if (const auto *CB = dyn_cast<CallBase>(V)) {
4852       if (CB->hasRetAttr(Attribute::NoUndef))
4853         return true;
4854     }
4855 
4856     if (const auto *PN = dyn_cast<PHINode>(V)) {
4857       unsigned Num = PN->getNumIncomingValues();
4858       bool IsWellDefined = true;
4859       for (unsigned i = 0; i < Num; ++i) {
4860         auto *TI = PN->getIncomingBlock(i)->getTerminator();
4861         if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
4862                                               DT, Depth + 1, PoisonOnly)) {
4863           IsWellDefined = false;
4864           break;
4865         }
4866       }
4867       if (IsWellDefined)
4868         return true;
4869     } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
4870       return true;
4871   }
4872 
4873   if (auto *I = dyn_cast<LoadInst>(V))
4874     if (I->getMetadata(LLVMContext::MD_noundef))
4875       return true;
4876 
4877   if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
4878     return true;
4879 
4880   // CxtI may be null or a cloned instruction.
4881   if (!CtxI || !CtxI->getParent() || !DT)
4882     return false;
4883 
4884   auto *DNode = DT->getNode(CtxI->getParent());
4885   if (!DNode)
4886     // Unreachable block
4887     return false;
4888 
4889   // If V is used as a branch condition before reaching CtxI, V cannot be
4890   // undef or poison.
4891   //   br V, BB1, BB2
4892   // BB1:
4893   //   CtxI ; V cannot be undef or poison here
4894   auto *Dominator = DNode->getIDom();
4895   while (Dominator) {
4896     auto *TI = Dominator->getBlock()->getTerminator();
4897 
4898     Value *Cond = nullptr;
4899     if (auto BI = dyn_cast<BranchInst>(TI)) {
4900       if (BI->isConditional())
4901         Cond = BI->getCondition();
4902     } else if (auto SI = dyn_cast<SwitchInst>(TI)) {
4903       Cond = SI->getCondition();
4904     }
4905 
4906     if (Cond) {
4907       if (Cond == V)
4908         return true;
4909       else if (PoisonOnly && isa<Operator>(Cond)) {
4910         // For poison, we can analyze further
4911         auto *Opr = cast<Operator>(Cond);
4912         if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
4913           return true;
4914       }
4915     }
4916 
4917     Dominator = Dominator->getIDom();
4918   }
4919 
4920   SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NoUndef};
4921   if (getKnowledgeValidInContext(V, AttrKinds, CtxI, DT, AC))
4922     return true;
4923 
4924   return false;
4925 }
4926 
4927 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
4928                                             const Instruction *CtxI,
4929                                             const DominatorTree *DT,
4930                                             unsigned Depth) {
4931   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
4932 }
4933 
4934 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
4935                                      const Instruction *CtxI,
4936                                      const DominatorTree *DT, unsigned Depth) {
4937   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
4938 }
4939 
4940 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
4941                                                  const DataLayout &DL,
4942                                                  AssumptionCache *AC,
4943                                                  const Instruction *CxtI,
4944                                                  const DominatorTree *DT) {
4945   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
4946                                        Add, DL, AC, CxtI, DT);
4947 }
4948 
4949 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
4950                                                  const Value *RHS,
4951                                                  const DataLayout &DL,
4952                                                  AssumptionCache *AC,
4953                                                  const Instruction *CxtI,
4954                                                  const DominatorTree *DT) {
4955   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4956 }
4957 
4958 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
4959   // Note: An atomic operation isn't guaranteed to return in a reasonable amount
4960   // of time because it's possible for another thread to interfere with it for an
4961   // arbitrary length of time, but programs aren't allowed to rely on that.
4962 
4963   // If there is no successor, then execution can't transfer to it.
4964   if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4965     return !CRI->unwindsToCaller();
4966   if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4967     return !CatchSwitch->unwindsToCaller();
4968   if (isa<ResumeInst>(I))
4969     return false;
4970   if (isa<ReturnInst>(I))
4971     return false;
4972   if (isa<UnreachableInst>(I))
4973     return false;
4974 
4975   // Calls can throw, or contain an infinite loop, or kill the process.
4976   if (const auto *CB = dyn_cast<CallBase>(I)) {
4977     // Call sites that throw have implicit non-local control flow.
4978     if (!CB->doesNotThrow())
4979       return false;
4980 
4981     // A function which doens't throw and has "willreturn" attribute will
4982     // always return.
4983     if (CB->hasFnAttr(Attribute::WillReturn))
4984       return true;
4985 
4986     // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4987     // etc. and thus not return.  However, LLVM already assumes that
4988     //
4989     //  - Thread exiting actions are modeled as writes to memory invisible to
4990     //    the program.
4991     //
4992     //  - Loops that don't have side effects (side effects are volatile/atomic
4993     //    stores and IO) always terminate (see http://llvm.org/PR965).
4994     //    Furthermore IO itself is also modeled as writes to memory invisible to
4995     //    the program.
4996     //
4997     // We rely on those assumptions here, and use the memory effects of the call
4998     // target as a proxy for checking that it always returns.
4999 
5000     // FIXME: This isn't aggressive enough; a call which only writes to a global
5001     // is guaranteed to return.
5002     return CB->onlyReadsMemory() || CB->onlyAccessesArgMemory();
5003   }
5004 
5005   // Other instructions return normally.
5006   return true;
5007 }
5008 
5009 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5010   // TODO: This is slightly conservative for invoke instruction since exiting
5011   // via an exception *is* normal control for them.
5012   for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
5013     if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
5014       return false;
5015   return true;
5016 }
5017 
5018 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5019                                                   const Loop *L) {
5020   // The loop header is guaranteed to be executed for every iteration.
5021   //
5022   // FIXME: Relax this constraint to cover all basic blocks that are
5023   // guaranteed to be executed at every iteration.
5024   if (I->getParent() != L->getHeader()) return false;
5025 
5026   for (const Instruction &LI : *L->getHeader()) {
5027     if (&LI == I) return true;
5028     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5029   }
5030   llvm_unreachable("Instruction not contained in its own parent basic block.");
5031 }
5032 
5033 bool llvm::propagatesPoison(const Operator *I) {
5034   switch (I->getOpcode()) {
5035   case Instruction::Freeze:
5036   case Instruction::Select:
5037   case Instruction::PHI:
5038   case Instruction::Call:
5039   case Instruction::Invoke:
5040     return false;
5041   case Instruction::ICmp:
5042   case Instruction::FCmp:
5043   case Instruction::GetElementPtr:
5044     return true;
5045   default:
5046     if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5047       return true;
5048 
5049     // Be conservative and return false.
5050     return false;
5051   }
5052 }
5053 
5054 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5055                                      SmallPtrSetImpl<const Value *> &Operands) {
5056   switch (I->getOpcode()) {
5057     case Instruction::Store:
5058       Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5059       break;
5060 
5061     case Instruction::Load:
5062       Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5063       break;
5064 
5065     case Instruction::AtomicCmpXchg:
5066       Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5067       break;
5068 
5069     case Instruction::AtomicRMW:
5070       Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5071       break;
5072 
5073     case Instruction::UDiv:
5074     case Instruction::SDiv:
5075     case Instruction::URem:
5076     case Instruction::SRem:
5077       Operands.insert(I->getOperand(1));
5078       break;
5079 
5080     case Instruction::Call:
5081     case Instruction::Invoke: {
5082       const CallBase *CB = cast<CallBase>(I);
5083       if (CB->isIndirectCall())
5084         Operands.insert(CB->getCalledOperand());
5085       for (unsigned i = 0; i < CB->arg_size(); ++i) {
5086         if (CB->paramHasAttr(i, Attribute::NoUndef))
5087           Operands.insert(CB->getArgOperand(i));
5088       }
5089       break;
5090     }
5091 
5092     default:
5093       break;
5094   }
5095 }
5096 
5097 bool llvm::mustTriggerUB(const Instruction *I,
5098                          const SmallSet<const Value *, 16>& KnownPoison) {
5099   SmallPtrSet<const Value *, 4> NonPoisonOps;
5100   getGuaranteedNonPoisonOps(I, NonPoisonOps);
5101 
5102   for (const auto *V : NonPoisonOps)
5103     if (KnownPoison.count(V))
5104       return true;
5105 
5106   return false;
5107 }
5108 
5109 static bool programUndefinedIfUndefOrPoison(const Value *V,
5110                                             bool PoisonOnly) {
5111   // We currently only look for uses of values within the same basic
5112   // block, as that makes it easier to guarantee that the uses will be
5113   // executed given that Inst is executed.
5114   //
5115   // FIXME: Expand this to consider uses beyond the same basic block. To do
5116   // this, look out for the distinction between post-dominance and strong
5117   // post-dominance.
5118   const BasicBlock *BB = nullptr;
5119   BasicBlock::const_iterator Begin;
5120   if (const auto *Inst = dyn_cast<Instruction>(V)) {
5121     BB = Inst->getParent();
5122     Begin = Inst->getIterator();
5123     Begin++;
5124   } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5125     BB = &Arg->getParent()->getEntryBlock();
5126     Begin = BB->begin();
5127   } else {
5128     return false;
5129   }
5130 
5131   BasicBlock::const_iterator End = BB->end();
5132 
5133   if (!PoisonOnly) {
5134     // Be conservative & just check whether a value is passed to a noundef
5135     // argument.
5136     // Instructions that raise UB with a poison operand are well-defined
5137     // or have unclear semantics when the input is partially undef.
5138     // For example, 'udiv x, (undef | 1)' isn't UB.
5139 
5140     for (auto &I : make_range(Begin, End)) {
5141       if (const auto *CB = dyn_cast<CallBase>(&I)) {
5142         for (unsigned i = 0; i < CB->arg_size(); ++i) {
5143           if (CB->paramHasAttr(i, Attribute::NoUndef) &&
5144               CB->getArgOperand(i) == V)
5145             return true;
5146         }
5147       }
5148       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5149         break;
5150     }
5151     return false;
5152   }
5153 
5154   // Set of instructions that we have proved will yield poison if Inst
5155   // does.
5156   SmallSet<const Value *, 16> YieldsPoison;
5157   SmallSet<const BasicBlock *, 4> Visited;
5158 
5159   YieldsPoison.insert(V);
5160   auto Propagate = [&](const User *User) {
5161     if (propagatesPoison(cast<Operator>(User)))
5162       YieldsPoison.insert(User);
5163   };
5164   for_each(V->users(), Propagate);
5165   Visited.insert(BB);
5166 
5167   unsigned Iter = 0;
5168   while (Iter++ < MaxAnalysisRecursionDepth) {
5169     for (auto &I : make_range(Begin, End)) {
5170       if (mustTriggerUB(&I, YieldsPoison))
5171         return true;
5172       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5173         return false;
5174 
5175       // Mark poison that propagates from I through uses of I.
5176       if (YieldsPoison.count(&I))
5177         for_each(I.users(), Propagate);
5178     }
5179 
5180     if (auto *NextBB = BB->getSingleSuccessor()) {
5181       if (Visited.insert(NextBB).second) {
5182         BB = NextBB;
5183         Begin = BB->getFirstNonPHI()->getIterator();
5184         End = BB->end();
5185         continue;
5186       }
5187     }
5188 
5189     break;
5190   }
5191   return false;
5192 }
5193 
5194 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5195   return ::programUndefinedIfUndefOrPoison(Inst, false);
5196 }
5197 
5198 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5199   return ::programUndefinedIfUndefOrPoison(Inst, true);
5200 }
5201 
5202 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5203   if (FMF.noNaNs())
5204     return true;
5205 
5206   if (auto *C = dyn_cast<ConstantFP>(V))
5207     return !C->isNaN();
5208 
5209   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5210     if (!C->getElementType()->isFloatingPointTy())
5211       return false;
5212     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5213       if (C->getElementAsAPFloat(I).isNaN())
5214         return false;
5215     }
5216     return true;
5217   }
5218 
5219   if (isa<ConstantAggregateZero>(V))
5220     return true;
5221 
5222   return false;
5223 }
5224 
5225 static bool isKnownNonZero(const Value *V) {
5226   if (auto *C = dyn_cast<ConstantFP>(V))
5227     return !C->isZero();
5228 
5229   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5230     if (!C->getElementType()->isFloatingPointTy())
5231       return false;
5232     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5233       if (C->getElementAsAPFloat(I).isZero())
5234         return false;
5235     }
5236     return true;
5237   }
5238 
5239   return false;
5240 }
5241 
5242 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5243 /// Given non-min/max outer cmp/select from the clamp pattern this
5244 /// function recognizes if it can be substitued by a "canonical" min/max
5245 /// pattern.
5246 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5247                                                Value *CmpLHS, Value *CmpRHS,
5248                                                Value *TrueVal, Value *FalseVal,
5249                                                Value *&LHS, Value *&RHS) {
5250   // Try to match
5251   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5252   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5253   // and return description of the outer Max/Min.
5254 
5255   // First, check if select has inverse order:
5256   if (CmpRHS == FalseVal) {
5257     std::swap(TrueVal, FalseVal);
5258     Pred = CmpInst::getInversePredicate(Pred);
5259   }
5260 
5261   // Assume success now. If there's no match, callers should not use these anyway.
5262   LHS = TrueVal;
5263   RHS = FalseVal;
5264 
5265   const APFloat *FC1;
5266   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5267     return {SPF_UNKNOWN, SPNB_NA, false};
5268 
5269   const APFloat *FC2;
5270   switch (Pred) {
5271   case CmpInst::FCMP_OLT:
5272   case CmpInst::FCMP_OLE:
5273   case CmpInst::FCMP_ULT:
5274   case CmpInst::FCMP_ULE:
5275     if (match(FalseVal,
5276               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5277                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5278         *FC1 < *FC2)
5279       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5280     break;
5281   case CmpInst::FCMP_OGT:
5282   case CmpInst::FCMP_OGE:
5283   case CmpInst::FCMP_UGT:
5284   case CmpInst::FCMP_UGE:
5285     if (match(FalseVal,
5286               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5287                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5288         *FC1 > *FC2)
5289       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5290     break;
5291   default:
5292     break;
5293   }
5294 
5295   return {SPF_UNKNOWN, SPNB_NA, false};
5296 }
5297 
5298 /// Recognize variations of:
5299 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5300 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5301                                       Value *CmpLHS, Value *CmpRHS,
5302                                       Value *TrueVal, Value *FalseVal) {
5303   // Swap the select operands and predicate to match the patterns below.
5304   if (CmpRHS != TrueVal) {
5305     Pred = ICmpInst::getSwappedPredicate(Pred);
5306     std::swap(TrueVal, FalseVal);
5307   }
5308   const APInt *C1;
5309   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5310     const APInt *C2;
5311     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5312     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5313         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5314       return {SPF_SMAX, SPNB_NA, false};
5315 
5316     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5317     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5318         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5319       return {SPF_SMIN, SPNB_NA, false};
5320 
5321     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5322     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5323         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5324       return {SPF_UMAX, SPNB_NA, false};
5325 
5326     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5327     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5328         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5329       return {SPF_UMIN, SPNB_NA, false};
5330   }
5331   return {SPF_UNKNOWN, SPNB_NA, false};
5332 }
5333 
5334 /// Recognize variations of:
5335 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5336 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5337                                                Value *CmpLHS, Value *CmpRHS,
5338                                                Value *TVal, Value *FVal,
5339                                                unsigned Depth) {
5340   // TODO: Allow FP min/max with nnan/nsz.
5341   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5342 
5343   Value *A = nullptr, *B = nullptr;
5344   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5345   if (!SelectPatternResult::isMinOrMax(L.Flavor))
5346     return {SPF_UNKNOWN, SPNB_NA, false};
5347 
5348   Value *C = nullptr, *D = nullptr;
5349   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5350   if (L.Flavor != R.Flavor)
5351     return {SPF_UNKNOWN, SPNB_NA, false};
5352 
5353   // We have something like: x Pred y ? min(a, b) : min(c, d).
5354   // Try to match the compare to the min/max operations of the select operands.
5355   // First, make sure we have the right compare predicate.
5356   switch (L.Flavor) {
5357   case SPF_SMIN:
5358     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5359       Pred = ICmpInst::getSwappedPredicate(Pred);
5360       std::swap(CmpLHS, CmpRHS);
5361     }
5362     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5363       break;
5364     return {SPF_UNKNOWN, SPNB_NA, false};
5365   case SPF_SMAX:
5366     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5367       Pred = ICmpInst::getSwappedPredicate(Pred);
5368       std::swap(CmpLHS, CmpRHS);
5369     }
5370     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5371       break;
5372     return {SPF_UNKNOWN, SPNB_NA, false};
5373   case SPF_UMIN:
5374     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5375       Pred = ICmpInst::getSwappedPredicate(Pred);
5376       std::swap(CmpLHS, CmpRHS);
5377     }
5378     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5379       break;
5380     return {SPF_UNKNOWN, SPNB_NA, false};
5381   case SPF_UMAX:
5382     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5383       Pred = ICmpInst::getSwappedPredicate(Pred);
5384       std::swap(CmpLHS, CmpRHS);
5385     }
5386     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5387       break;
5388     return {SPF_UNKNOWN, SPNB_NA, false};
5389   default:
5390     return {SPF_UNKNOWN, SPNB_NA, false};
5391   }
5392 
5393   // If there is a common operand in the already matched min/max and the other
5394   // min/max operands match the compare operands (either directly or inverted),
5395   // then this is min/max of the same flavor.
5396 
5397   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5398   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5399   if (D == B) {
5400     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5401                                          match(A, m_Not(m_Specific(CmpRHS)))))
5402       return {L.Flavor, SPNB_NA, false};
5403   }
5404   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5405   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5406   if (C == B) {
5407     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5408                                          match(A, m_Not(m_Specific(CmpRHS)))))
5409       return {L.Flavor, SPNB_NA, false};
5410   }
5411   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5412   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5413   if (D == A) {
5414     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5415                                          match(B, m_Not(m_Specific(CmpRHS)))))
5416       return {L.Flavor, SPNB_NA, false};
5417   }
5418   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5419   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5420   if (C == A) {
5421     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5422                                          match(B, m_Not(m_Specific(CmpRHS)))))
5423       return {L.Flavor, SPNB_NA, false};
5424   }
5425 
5426   return {SPF_UNKNOWN, SPNB_NA, false};
5427 }
5428 
5429 /// If the input value is the result of a 'not' op, constant integer, or vector
5430 /// splat of a constant integer, return the bitwise-not source value.
5431 /// TODO: This could be extended to handle non-splat vector integer constants.
5432 static Value *getNotValue(Value *V) {
5433   Value *NotV;
5434   if (match(V, m_Not(m_Value(NotV))))
5435     return NotV;
5436 
5437   const APInt *C;
5438   if (match(V, m_APInt(C)))
5439     return ConstantInt::get(V->getType(), ~(*C));
5440 
5441   return nullptr;
5442 }
5443 
5444 /// Match non-obvious integer minimum and maximum sequences.
5445 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5446                                        Value *CmpLHS, Value *CmpRHS,
5447                                        Value *TrueVal, Value *FalseVal,
5448                                        Value *&LHS, Value *&RHS,
5449                                        unsigned Depth) {
5450   // Assume success. If there's no match, callers should not use these anyway.
5451   LHS = TrueVal;
5452   RHS = FalseVal;
5453 
5454   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5455   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5456     return SPR;
5457 
5458   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5459   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5460     return SPR;
5461 
5462   // Look through 'not' ops to find disguised min/max.
5463   // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5464   // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5465   if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5466     switch (Pred) {
5467     case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5468     case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5469     case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5470     case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5471     default: break;
5472     }
5473   }
5474 
5475   // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5476   // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5477   if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5478     switch (Pred) {
5479     case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
5480     case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
5481     case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
5482     case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
5483     default: break;
5484     }
5485   }
5486 
5487   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
5488     return {SPF_UNKNOWN, SPNB_NA, false};
5489 
5490   // Z = X -nsw Y
5491   // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
5492   // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
5493   if (match(TrueVal, m_Zero()) &&
5494       match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5495     return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
5496 
5497   // Z = X -nsw Y
5498   // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
5499   // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
5500   if (match(FalseVal, m_Zero()) &&
5501       match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
5502     return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
5503 
5504   const APInt *C1;
5505   if (!match(CmpRHS, m_APInt(C1)))
5506     return {SPF_UNKNOWN, SPNB_NA, false};
5507 
5508   // An unsigned min/max can be written with a signed compare.
5509   const APInt *C2;
5510   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
5511       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
5512     // Is the sign bit set?
5513     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
5514     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
5515     if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
5516         C2->isMaxSignedValue())
5517       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5518 
5519     // Is the sign bit clear?
5520     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
5521     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
5522     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
5523         C2->isMinSignedValue())
5524       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
5525   }
5526 
5527   return {SPF_UNKNOWN, SPNB_NA, false};
5528 }
5529 
5530 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
5531   assert(X && Y && "Invalid operand");
5532 
5533   // X = sub (0, Y) || X = sub nsw (0, Y)
5534   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
5535       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
5536     return true;
5537 
5538   // Y = sub (0, X) || Y = sub nsw (0, X)
5539   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
5540       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
5541     return true;
5542 
5543   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
5544   Value *A, *B;
5545   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
5546                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
5547          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
5548                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
5549 }
5550 
5551 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
5552                                               FastMathFlags FMF,
5553                                               Value *CmpLHS, Value *CmpRHS,
5554                                               Value *TrueVal, Value *FalseVal,
5555                                               Value *&LHS, Value *&RHS,
5556                                               unsigned Depth) {
5557   if (CmpInst::isFPPredicate(Pred)) {
5558     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
5559     // 0.0 operand, set the compare's 0.0 operands to that same value for the
5560     // purpose of identifying min/max. Disregard vector constants with undefined
5561     // elements because those can not be back-propagated for analysis.
5562     Value *OutputZeroVal = nullptr;
5563     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
5564         !cast<Constant>(TrueVal)->containsUndefElement())
5565       OutputZeroVal = TrueVal;
5566     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
5567              !cast<Constant>(FalseVal)->containsUndefElement())
5568       OutputZeroVal = FalseVal;
5569 
5570     if (OutputZeroVal) {
5571       if (match(CmpLHS, m_AnyZeroFP()))
5572         CmpLHS = OutputZeroVal;
5573       if (match(CmpRHS, m_AnyZeroFP()))
5574         CmpRHS = OutputZeroVal;
5575     }
5576   }
5577 
5578   LHS = CmpLHS;
5579   RHS = CmpRHS;
5580 
5581   // Signed zero may return inconsistent results between implementations.
5582   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
5583   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
5584   // Therefore, we behave conservatively and only proceed if at least one of the
5585   // operands is known to not be zero or if we don't care about signed zero.
5586   switch (Pred) {
5587   default: break;
5588   // FIXME: Include OGT/OLT/UGT/ULT.
5589   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
5590   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
5591     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5592         !isKnownNonZero(CmpRHS))
5593       return {SPF_UNKNOWN, SPNB_NA, false};
5594   }
5595 
5596   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
5597   bool Ordered = false;
5598 
5599   // When given one NaN and one non-NaN input:
5600   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
5601   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
5602   //     ordered comparison fails), which could be NaN or non-NaN.
5603   // so here we discover exactly what NaN behavior is required/accepted.
5604   if (CmpInst::isFPPredicate(Pred)) {
5605     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
5606     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
5607 
5608     if (LHSSafe && RHSSafe) {
5609       // Both operands are known non-NaN.
5610       NaNBehavior = SPNB_RETURNS_ANY;
5611     } else if (CmpInst::isOrdered(Pred)) {
5612       // An ordered comparison will return false when given a NaN, so it
5613       // returns the RHS.
5614       Ordered = true;
5615       if (LHSSafe)
5616         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
5617         NaNBehavior = SPNB_RETURNS_NAN;
5618       else if (RHSSafe)
5619         NaNBehavior = SPNB_RETURNS_OTHER;
5620       else
5621         // Completely unsafe.
5622         return {SPF_UNKNOWN, SPNB_NA, false};
5623     } else {
5624       Ordered = false;
5625       // An unordered comparison will return true when given a NaN, so it
5626       // returns the LHS.
5627       if (LHSSafe)
5628         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
5629         NaNBehavior = SPNB_RETURNS_OTHER;
5630       else if (RHSSafe)
5631         NaNBehavior = SPNB_RETURNS_NAN;
5632       else
5633         // Completely unsafe.
5634         return {SPF_UNKNOWN, SPNB_NA, false};
5635     }
5636   }
5637 
5638   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
5639     std::swap(CmpLHS, CmpRHS);
5640     Pred = CmpInst::getSwappedPredicate(Pred);
5641     if (NaNBehavior == SPNB_RETURNS_NAN)
5642       NaNBehavior = SPNB_RETURNS_OTHER;
5643     else if (NaNBehavior == SPNB_RETURNS_OTHER)
5644       NaNBehavior = SPNB_RETURNS_NAN;
5645     Ordered = !Ordered;
5646   }
5647 
5648   // ([if]cmp X, Y) ? X : Y
5649   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
5650     switch (Pred) {
5651     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
5652     case ICmpInst::ICMP_UGT:
5653     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
5654     case ICmpInst::ICMP_SGT:
5655     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
5656     case ICmpInst::ICMP_ULT:
5657     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
5658     case ICmpInst::ICMP_SLT:
5659     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
5660     case FCmpInst::FCMP_UGT:
5661     case FCmpInst::FCMP_UGE:
5662     case FCmpInst::FCMP_OGT:
5663     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
5664     case FCmpInst::FCMP_ULT:
5665     case FCmpInst::FCMP_ULE:
5666     case FCmpInst::FCMP_OLT:
5667     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
5668     }
5669   }
5670 
5671   if (isKnownNegation(TrueVal, FalseVal)) {
5672     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
5673     // match against either LHS or sext(LHS).
5674     auto MaybeSExtCmpLHS =
5675         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
5676     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
5677     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
5678     if (match(TrueVal, MaybeSExtCmpLHS)) {
5679       // Set the return values. If the compare uses the negated value (-X >s 0),
5680       // swap the return values because the negated value is always 'RHS'.
5681       LHS = TrueVal;
5682       RHS = FalseVal;
5683       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
5684         std::swap(LHS, RHS);
5685 
5686       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
5687       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
5688       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5689         return {SPF_ABS, SPNB_NA, false};
5690 
5691       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
5692       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
5693         return {SPF_ABS, SPNB_NA, false};
5694 
5695       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
5696       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
5697       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5698         return {SPF_NABS, SPNB_NA, false};
5699     }
5700     else if (match(FalseVal, MaybeSExtCmpLHS)) {
5701       // Set the return values. If the compare uses the negated value (-X >s 0),
5702       // swap the return values because the negated value is always 'RHS'.
5703       LHS = FalseVal;
5704       RHS = TrueVal;
5705       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
5706         std::swap(LHS, RHS);
5707 
5708       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
5709       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
5710       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
5711         return {SPF_NABS, SPNB_NA, false};
5712 
5713       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
5714       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
5715       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
5716         return {SPF_ABS, SPNB_NA, false};
5717     }
5718   }
5719 
5720   if (CmpInst::isIntPredicate(Pred))
5721     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
5722 
5723   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
5724   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
5725   // semantics than minNum. Be conservative in such case.
5726   if (NaNBehavior != SPNB_RETURNS_ANY ||
5727       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
5728        !isKnownNonZero(CmpRHS)))
5729     return {SPF_UNKNOWN, SPNB_NA, false};
5730 
5731   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
5732 }
5733 
5734 /// Helps to match a select pattern in case of a type mismatch.
5735 ///
5736 /// The function processes the case when type of true and false values of a
5737 /// select instruction differs from type of the cmp instruction operands because
5738 /// of a cast instruction. The function checks if it is legal to move the cast
5739 /// operation after "select". If yes, it returns the new second value of
5740 /// "select" (with the assumption that cast is moved):
5741 /// 1. As operand of cast instruction when both values of "select" are same cast
5742 /// instructions.
5743 /// 2. As restored constant (by applying reverse cast operation) when the first
5744 /// value of the "select" is a cast operation and the second value is a
5745 /// constant.
5746 /// NOTE: We return only the new second value because the first value could be
5747 /// accessed as operand of cast instruction.
5748 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
5749                               Instruction::CastOps *CastOp) {
5750   auto *Cast1 = dyn_cast<CastInst>(V1);
5751   if (!Cast1)
5752     return nullptr;
5753 
5754   *CastOp = Cast1->getOpcode();
5755   Type *SrcTy = Cast1->getSrcTy();
5756   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
5757     // If V1 and V2 are both the same cast from the same type, look through V1.
5758     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
5759       return Cast2->getOperand(0);
5760     return nullptr;
5761   }
5762 
5763   auto *C = dyn_cast<Constant>(V2);
5764   if (!C)
5765     return nullptr;
5766 
5767   Constant *CastedTo = nullptr;
5768   switch (*CastOp) {
5769   case Instruction::ZExt:
5770     if (CmpI->isUnsigned())
5771       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
5772     break;
5773   case Instruction::SExt:
5774     if (CmpI->isSigned())
5775       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
5776     break;
5777   case Instruction::Trunc:
5778     Constant *CmpConst;
5779     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
5780         CmpConst->getType() == SrcTy) {
5781       // Here we have the following case:
5782       //
5783       //   %cond = cmp iN %x, CmpConst
5784       //   %tr = trunc iN %x to iK
5785       //   %narrowsel = select i1 %cond, iK %t, iK C
5786       //
5787       // We can always move trunc after select operation:
5788       //
5789       //   %cond = cmp iN %x, CmpConst
5790       //   %widesel = select i1 %cond, iN %x, iN CmpConst
5791       //   %tr = trunc iN %widesel to iK
5792       //
5793       // Note that C could be extended in any way because we don't care about
5794       // upper bits after truncation. It can't be abs pattern, because it would
5795       // look like:
5796       //
5797       //   select i1 %cond, x, -x.
5798       //
5799       // So only min/max pattern could be matched. Such match requires widened C
5800       // == CmpConst. That is why set widened C = CmpConst, condition trunc
5801       // CmpConst == C is checked below.
5802       CastedTo = CmpConst;
5803     } else {
5804       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5805     }
5806     break;
5807   case Instruction::FPTrunc:
5808     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5809     break;
5810   case Instruction::FPExt:
5811     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5812     break;
5813   case Instruction::FPToUI:
5814     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5815     break;
5816   case Instruction::FPToSI:
5817     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5818     break;
5819   case Instruction::UIToFP:
5820     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5821     break;
5822   case Instruction::SIToFP:
5823     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5824     break;
5825   default:
5826     break;
5827   }
5828 
5829   if (!CastedTo)
5830     return nullptr;
5831 
5832   // Make sure the cast doesn't lose any information.
5833   Constant *CastedBack =
5834       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5835   if (CastedBack != C)
5836     return nullptr;
5837 
5838   return CastedTo;
5839 }
5840 
5841 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
5842                                              Instruction::CastOps *CastOp,
5843                                              unsigned Depth) {
5844   if (Depth >= MaxAnalysisRecursionDepth)
5845     return {SPF_UNKNOWN, SPNB_NA, false};
5846 
5847   SelectInst *SI = dyn_cast<SelectInst>(V);
5848   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5849 
5850   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5851   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5852 
5853   Value *TrueVal = SI->getTrueValue();
5854   Value *FalseVal = SI->getFalseValue();
5855 
5856   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5857                                             CastOp, Depth);
5858 }
5859 
5860 SelectPatternResult llvm::matchDecomposedSelectPattern(
5861     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5862     Instruction::CastOps *CastOp, unsigned Depth) {
5863   CmpInst::Predicate Pred = CmpI->getPredicate();
5864   Value *CmpLHS = CmpI->getOperand(0);
5865   Value *CmpRHS = CmpI->getOperand(1);
5866   FastMathFlags FMF;
5867   if (isa<FPMathOperator>(CmpI))
5868     FMF = CmpI->getFastMathFlags();
5869 
5870   // Bail out early.
5871   if (CmpI->isEquality())
5872     return {SPF_UNKNOWN, SPNB_NA, false};
5873 
5874   // Deal with type mismatches.
5875   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5876     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5877       // If this is a potential fmin/fmax with a cast to integer, then ignore
5878       // -0.0 because there is no corresponding integer value.
5879       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5880         FMF.setNoSignedZeros();
5881       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5882                                   cast<CastInst>(TrueVal)->getOperand(0), C,
5883                                   LHS, RHS, Depth);
5884     }
5885     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5886       // If this is a potential fmin/fmax with a cast to integer, then ignore
5887       // -0.0 because there is no corresponding integer value.
5888       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5889         FMF.setNoSignedZeros();
5890       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5891                                   C, cast<CastInst>(FalseVal)->getOperand(0),
5892                                   LHS, RHS, Depth);
5893     }
5894   }
5895   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5896                               LHS, RHS, Depth);
5897 }
5898 
5899 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
5900   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5901   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5902   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5903   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5904   if (SPF == SPF_FMINNUM)
5905     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5906   if (SPF == SPF_FMAXNUM)
5907     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5908   llvm_unreachable("unhandled!");
5909 }
5910 
5911 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
5912   if (SPF == SPF_SMIN) return SPF_SMAX;
5913   if (SPF == SPF_UMIN) return SPF_UMAX;
5914   if (SPF == SPF_SMAX) return SPF_SMIN;
5915   if (SPF == SPF_UMAX) return SPF_UMIN;
5916   llvm_unreachable("unhandled!");
5917 }
5918 
5919 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
5920   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
5921 }
5922 
5923 std::pair<Intrinsic::ID, bool>
5924 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
5925   // Check if VL contains select instructions that can be folded into a min/max
5926   // vector intrinsic and return the intrinsic if it is possible.
5927   // TODO: Support floating point min/max.
5928   bool AllCmpSingleUse = true;
5929   SelectPatternResult SelectPattern;
5930   SelectPattern.Flavor = SPF_UNKNOWN;
5931   if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
5932         Value *LHS, *RHS;
5933         auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
5934         if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
5935             CurrentPattern.Flavor == SPF_FMINNUM ||
5936             CurrentPattern.Flavor == SPF_FMAXNUM ||
5937             !I->getType()->isIntOrIntVectorTy())
5938           return false;
5939         if (SelectPattern.Flavor != SPF_UNKNOWN &&
5940             SelectPattern.Flavor != CurrentPattern.Flavor)
5941           return false;
5942         SelectPattern = CurrentPattern;
5943         AllCmpSingleUse &=
5944             match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
5945         return true;
5946       })) {
5947     switch (SelectPattern.Flavor) {
5948     case SPF_SMIN:
5949       return {Intrinsic::smin, AllCmpSingleUse};
5950     case SPF_UMIN:
5951       return {Intrinsic::umin, AllCmpSingleUse};
5952     case SPF_SMAX:
5953       return {Intrinsic::smax, AllCmpSingleUse};
5954     case SPF_UMAX:
5955       return {Intrinsic::umax, AllCmpSingleUse};
5956     default:
5957       llvm_unreachable("unexpected select pattern flavor");
5958     }
5959   }
5960   return {Intrinsic::not_intrinsic, false};
5961 }
5962 
5963 /// Return true if "icmp Pred LHS RHS" is always true.
5964 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5965                             const Value *RHS, const DataLayout &DL,
5966                             unsigned Depth) {
5967   assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
5968   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5969     return true;
5970 
5971   switch (Pred) {
5972   default:
5973     return false;
5974 
5975   case CmpInst::ICMP_SLE: {
5976     const APInt *C;
5977 
5978     // LHS s<= LHS +_{nsw} C   if C >= 0
5979     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5980       return !C->isNegative();
5981     return false;
5982   }
5983 
5984   case CmpInst::ICMP_ULE: {
5985     const APInt *C;
5986 
5987     // LHS u<= LHS +_{nuw} C   for any C
5988     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5989       return true;
5990 
5991     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5992     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5993                                        const Value *&X,
5994                                        const APInt *&CA, const APInt *&CB) {
5995       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
5996           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
5997         return true;
5998 
5999       // If X & C == 0 then (X | C) == X +_{nuw} C
6000       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6001           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6002         KnownBits Known(CA->getBitWidth());
6003         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6004                          /*CxtI*/ nullptr, /*DT*/ nullptr);
6005         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6006           return true;
6007       }
6008 
6009       return false;
6010     };
6011 
6012     const Value *X;
6013     const APInt *CLHS, *CRHS;
6014     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6015       return CLHS->ule(*CRHS);
6016 
6017     return false;
6018   }
6019   }
6020 }
6021 
6022 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6023 /// ALHS ARHS" is true.  Otherwise, return None.
6024 static Optional<bool>
6025 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6026                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
6027                       const DataLayout &DL, unsigned Depth) {
6028   switch (Pred) {
6029   default:
6030     return None;
6031 
6032   case CmpInst::ICMP_SLT:
6033   case CmpInst::ICMP_SLE:
6034     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6035         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6036       return true;
6037     return None;
6038 
6039   case CmpInst::ICMP_ULT:
6040   case CmpInst::ICMP_ULE:
6041     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6042         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6043       return true;
6044     return None;
6045   }
6046 }
6047 
6048 /// Return true if the operands of the two compares match.  IsSwappedOps is true
6049 /// when the operands match, but are swapped.
6050 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6051                           const Value *BLHS, const Value *BRHS,
6052                           bool &IsSwappedOps) {
6053 
6054   bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6055   IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6056   return IsMatchingOps || IsSwappedOps;
6057 }
6058 
6059 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6060 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6061 /// Otherwise, return None if we can't infer anything.
6062 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6063                                                     CmpInst::Predicate BPred,
6064                                                     bool AreSwappedOps) {
6065   // Canonicalize the predicate as if the operands were not commuted.
6066   if (AreSwappedOps)
6067     BPred = ICmpInst::getSwappedPredicate(BPred);
6068 
6069   if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6070     return true;
6071   if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6072     return false;
6073 
6074   return None;
6075 }
6076 
6077 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6078 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6079 /// Otherwise, return None if we can't infer anything.
6080 static Optional<bool>
6081 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6082                                  const ConstantInt *C1,
6083                                  CmpInst::Predicate BPred,
6084                                  const ConstantInt *C2) {
6085   ConstantRange DomCR =
6086       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
6087   ConstantRange CR =
6088       ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
6089   ConstantRange Intersection = DomCR.intersectWith(CR);
6090   ConstantRange Difference = DomCR.difference(CR);
6091   if (Intersection.isEmptySet())
6092     return false;
6093   if (Difference.isEmptySet())
6094     return true;
6095   return None;
6096 }
6097 
6098 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6099 /// false.  Otherwise, return None if we can't infer anything.
6100 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6101                                          CmpInst::Predicate BPred,
6102                                          const Value *BLHS, const Value *BRHS,
6103                                          const DataLayout &DL, bool LHSIsTrue,
6104                                          unsigned Depth) {
6105   Value *ALHS = LHS->getOperand(0);
6106   Value *ARHS = LHS->getOperand(1);
6107 
6108   // The rest of the logic assumes the LHS condition is true.  If that's not the
6109   // case, invert the predicate to make it so.
6110   CmpInst::Predicate APred =
6111       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6112 
6113   // Can we infer anything when the two compares have matching operands?
6114   bool AreSwappedOps;
6115   if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6116     if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6117             APred, BPred, AreSwappedOps))
6118       return Implication;
6119     // No amount of additional analysis will infer the second condition, so
6120     // early exit.
6121     return None;
6122   }
6123 
6124   // Can we infer anything when the LHS operands match and the RHS operands are
6125   // constants (not necessarily matching)?
6126   if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
6127     if (Optional<bool> Implication = isImpliedCondMatchingImmOperands(
6128             APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS)))
6129       return Implication;
6130     // No amount of additional analysis will infer the second condition, so
6131     // early exit.
6132     return None;
6133   }
6134 
6135   if (APred == BPred)
6136     return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6137   return None;
6138 }
6139 
6140 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6141 /// false.  Otherwise, return None if we can't infer anything.  We expect the
6142 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
6143 static Optional<bool>
6144 isImpliedCondAndOr(const BinaryOperator *LHS, CmpInst::Predicate RHSPred,
6145                    const Value *RHSOp0, const Value *RHSOp1,
6146 
6147                    const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6148   // The LHS must be an 'or' or an 'and' instruction.
6149   assert((LHS->getOpcode() == Instruction::And ||
6150           LHS->getOpcode() == Instruction::Or) &&
6151          "Expected LHS to be 'and' or 'or'.");
6152 
6153   assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6154 
6155   // If the result of an 'or' is false, then we know both legs of the 'or' are
6156   // false.  Similarly, if the result of an 'and' is true, then we know both
6157   // legs of the 'and' are true.
6158   Value *ALHS, *ARHS;
6159   if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
6160       (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
6161     // FIXME: Make this non-recursion.
6162     if (Optional<bool> Implication = isImpliedCondition(
6163             ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6164       return Implication;
6165     if (Optional<bool> Implication = isImpliedCondition(
6166             ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6167       return Implication;
6168     return None;
6169   }
6170   return None;
6171 }
6172 
6173 Optional<bool>
6174 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6175                          const Value *RHSOp0, const Value *RHSOp1,
6176                          const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6177   // Bail out when we hit the limit.
6178   if (Depth == MaxAnalysisRecursionDepth)
6179     return None;
6180 
6181   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6182   // example.
6183   if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6184     return None;
6185 
6186   Type *OpTy = LHS->getType();
6187   assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
6188 
6189   // FIXME: Extending the code below to handle vectors.
6190   if (OpTy->isVectorTy())
6191     return None;
6192 
6193   assert(OpTy->isIntegerTy(1) && "implied by above");
6194 
6195   // Both LHS and RHS are icmps.
6196   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6197   if (LHSCmp)
6198     return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6199                               Depth);
6200 
6201   /// The LHS should be an 'or' or an 'and' instruction.  We expect the RHS to
6202   /// be / an icmp. FIXME: Add support for and/or on the RHS.
6203   const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
6204   if (LHSBO) {
6205     if ((LHSBO->getOpcode() == Instruction::And ||
6206          LHSBO->getOpcode() == Instruction::Or))
6207       return isImpliedCondAndOr(LHSBO, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6208                                 Depth);
6209   }
6210   return None;
6211 }
6212 
6213 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6214                                         const DataLayout &DL, bool LHSIsTrue,
6215                                         unsigned Depth) {
6216   // LHS ==> RHS by definition
6217   if (LHS == RHS)
6218     return LHSIsTrue;
6219 
6220   const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
6221   if (RHSCmp)
6222     return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6223                               RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6224                               LHSIsTrue, Depth);
6225   return None;
6226 }
6227 
6228 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6229 // condition dominating ContextI or nullptr, if no condition is found.
6230 static std::pair<Value *, bool>
6231 getDomPredecessorCondition(const Instruction *ContextI) {
6232   if (!ContextI || !ContextI->getParent())
6233     return {nullptr, false};
6234 
6235   // TODO: This is a poor/cheap way to determine dominance. Should we use a
6236   // dominator tree (eg, from a SimplifyQuery) instead?
6237   const BasicBlock *ContextBB = ContextI->getParent();
6238   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6239   if (!PredBB)
6240     return {nullptr, false};
6241 
6242   // We need a conditional branch in the predecessor.
6243   Value *PredCond;
6244   BasicBlock *TrueBB, *FalseBB;
6245   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6246     return {nullptr, false};
6247 
6248   // The branch should get simplified. Don't bother simplifying this condition.
6249   if (TrueBB == FalseBB)
6250     return {nullptr, false};
6251 
6252   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6253          "Predecessor block does not point to successor?");
6254 
6255   // Is this condition implied by the predecessor condition?
6256   return {PredCond, TrueBB == ContextBB};
6257 }
6258 
6259 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6260                                              const Instruction *ContextI,
6261                                              const DataLayout &DL) {
6262   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6263   auto PredCond = getDomPredecessorCondition(ContextI);
6264   if (PredCond.first)
6265     return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6266   return None;
6267 }
6268 
6269 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6270                                              const Value *LHS, const Value *RHS,
6271                                              const Instruction *ContextI,
6272                                              const DataLayout &DL) {
6273   auto PredCond = getDomPredecessorCondition(ContextI);
6274   if (PredCond.first)
6275     return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6276                               PredCond.second);
6277   return None;
6278 }
6279 
6280 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6281                               APInt &Upper, const InstrInfoQuery &IIQ) {
6282   unsigned Width = Lower.getBitWidth();
6283   const APInt *C;
6284   switch (BO.getOpcode()) {
6285   case Instruction::Add:
6286     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6287       // FIXME: If we have both nuw and nsw, we should reduce the range further.
6288       if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6289         // 'add nuw x, C' produces [C, UINT_MAX].
6290         Lower = *C;
6291       } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
6292         if (C->isNegative()) {
6293           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6294           Lower = APInt::getSignedMinValue(Width);
6295           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6296         } else {
6297           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6298           Lower = APInt::getSignedMinValue(Width) + *C;
6299           Upper = APInt::getSignedMaxValue(Width) + 1;
6300         }
6301       }
6302     }
6303     break;
6304 
6305   case Instruction::And:
6306     if (match(BO.getOperand(1), m_APInt(C)))
6307       // 'and x, C' produces [0, C].
6308       Upper = *C + 1;
6309     break;
6310 
6311   case Instruction::Or:
6312     if (match(BO.getOperand(1), m_APInt(C)))
6313       // 'or x, C' produces [C, UINT_MAX].
6314       Lower = *C;
6315     break;
6316 
6317   case Instruction::AShr:
6318     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6319       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6320       Lower = APInt::getSignedMinValue(Width).ashr(*C);
6321       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6322     } else if (match(BO.getOperand(0), m_APInt(C))) {
6323       unsigned ShiftAmount = Width - 1;
6324       if (!C->isNullValue() && IIQ.isExact(&BO))
6325         ShiftAmount = C->countTrailingZeros();
6326       if (C->isNegative()) {
6327         // 'ashr C, x' produces [C, C >> (Width-1)]
6328         Lower = *C;
6329         Upper = C->ashr(ShiftAmount) + 1;
6330       } else {
6331         // 'ashr C, x' produces [C >> (Width-1), C]
6332         Lower = C->ashr(ShiftAmount);
6333         Upper = *C + 1;
6334       }
6335     }
6336     break;
6337 
6338   case Instruction::LShr:
6339     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6340       // 'lshr x, C' produces [0, UINT_MAX >> C].
6341       Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1;
6342     } else if (match(BO.getOperand(0), m_APInt(C))) {
6343       // 'lshr C, x' produces [C >> (Width-1), C].
6344       unsigned ShiftAmount = Width - 1;
6345       if (!C->isNullValue() && IIQ.isExact(&BO))
6346         ShiftAmount = C->countTrailingZeros();
6347       Lower = C->lshr(ShiftAmount);
6348       Upper = *C + 1;
6349     }
6350     break;
6351 
6352   case Instruction::Shl:
6353     if (match(BO.getOperand(0), m_APInt(C))) {
6354       if (IIQ.hasNoUnsignedWrap(&BO)) {
6355         // 'shl nuw C, x' produces [C, C << CLZ(C)]
6356         Lower = *C;
6357         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6358       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6359         if (C->isNegative()) {
6360           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6361           unsigned ShiftAmount = C->countLeadingOnes() - 1;
6362           Lower = C->shl(ShiftAmount);
6363           Upper = *C + 1;
6364         } else {
6365           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6366           unsigned ShiftAmount = C->countLeadingZeros() - 1;
6367           Lower = *C;
6368           Upper = C->shl(ShiftAmount) + 1;
6369         }
6370       }
6371     }
6372     break;
6373 
6374   case Instruction::SDiv:
6375     if (match(BO.getOperand(1), m_APInt(C))) {
6376       APInt IntMin = APInt::getSignedMinValue(Width);
6377       APInt IntMax = APInt::getSignedMaxValue(Width);
6378       if (C->isAllOnesValue()) {
6379         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6380         //    where C != -1 and C != 0 and C != 1
6381         Lower = IntMin + 1;
6382         Upper = IntMax + 1;
6383       } else if (C->countLeadingZeros() < Width - 1) {
6384         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6385         //    where C != -1 and C != 0 and C != 1
6386         Lower = IntMin.sdiv(*C);
6387         Upper = IntMax.sdiv(*C);
6388         if (Lower.sgt(Upper))
6389           std::swap(Lower, Upper);
6390         Upper = Upper + 1;
6391         assert(Upper != Lower && "Upper part of range has wrapped!");
6392       }
6393     } else if (match(BO.getOperand(0), m_APInt(C))) {
6394       if (C->isMinSignedValue()) {
6395         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
6396         Lower = *C;
6397         Upper = Lower.lshr(1) + 1;
6398       } else {
6399         // 'sdiv C, x' produces [-|C|, |C|].
6400         Upper = C->abs() + 1;
6401         Lower = (-Upper) + 1;
6402       }
6403     }
6404     break;
6405 
6406   case Instruction::UDiv:
6407     if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
6408       // 'udiv x, C' produces [0, UINT_MAX / C].
6409       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
6410     } else if (match(BO.getOperand(0), m_APInt(C))) {
6411       // 'udiv C, x' produces [0, C].
6412       Upper = *C + 1;
6413     }
6414     break;
6415 
6416   case Instruction::SRem:
6417     if (match(BO.getOperand(1), m_APInt(C))) {
6418       // 'srem x, C' produces (-|C|, |C|).
6419       Upper = C->abs();
6420       Lower = (-Upper) + 1;
6421     }
6422     break;
6423 
6424   case Instruction::URem:
6425     if (match(BO.getOperand(1), m_APInt(C)))
6426       // 'urem x, C' produces [0, C).
6427       Upper = *C;
6428     break;
6429 
6430   default:
6431     break;
6432   }
6433 }
6434 
6435 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
6436                                   APInt &Upper) {
6437   unsigned Width = Lower.getBitWidth();
6438   const APInt *C;
6439   switch (II.getIntrinsicID()) {
6440   case Intrinsic::ctpop:
6441   case Intrinsic::ctlz:
6442   case Intrinsic::cttz:
6443     // Maximum of set/clear bits is the bit width.
6444     assert(Lower == 0 && "Expected lower bound to be zero");
6445     Upper = Width + 1;
6446     break;
6447   case Intrinsic::uadd_sat:
6448     // uadd.sat(x, C) produces [C, UINT_MAX].
6449     if (match(II.getOperand(0), m_APInt(C)) ||
6450         match(II.getOperand(1), m_APInt(C)))
6451       Lower = *C;
6452     break;
6453   case Intrinsic::sadd_sat:
6454     if (match(II.getOperand(0), m_APInt(C)) ||
6455         match(II.getOperand(1), m_APInt(C))) {
6456       if (C->isNegative()) {
6457         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
6458         Lower = APInt::getSignedMinValue(Width);
6459         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6460       } else {
6461         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
6462         Lower = APInt::getSignedMinValue(Width) + *C;
6463         Upper = APInt::getSignedMaxValue(Width) + 1;
6464       }
6465     }
6466     break;
6467   case Intrinsic::usub_sat:
6468     // usub.sat(C, x) produces [0, C].
6469     if (match(II.getOperand(0), m_APInt(C)))
6470       Upper = *C + 1;
6471     // usub.sat(x, C) produces [0, UINT_MAX - C].
6472     else if (match(II.getOperand(1), m_APInt(C)))
6473       Upper = APInt::getMaxValue(Width) - *C + 1;
6474     break;
6475   case Intrinsic::ssub_sat:
6476     if (match(II.getOperand(0), m_APInt(C))) {
6477       if (C->isNegative()) {
6478         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
6479         Lower = APInt::getSignedMinValue(Width);
6480         Upper = *C - APInt::getSignedMinValue(Width) + 1;
6481       } else {
6482         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
6483         Lower = *C - APInt::getSignedMaxValue(Width);
6484         Upper = APInt::getSignedMaxValue(Width) + 1;
6485       }
6486     } else if (match(II.getOperand(1), m_APInt(C))) {
6487       if (C->isNegative()) {
6488         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
6489         Lower = APInt::getSignedMinValue(Width) - *C;
6490         Upper = APInt::getSignedMaxValue(Width) + 1;
6491       } else {
6492         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
6493         Lower = APInt::getSignedMinValue(Width);
6494         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
6495       }
6496     }
6497     break;
6498   case Intrinsic::umin:
6499   case Intrinsic::umax:
6500   case Intrinsic::smin:
6501   case Intrinsic::smax:
6502     if (!match(II.getOperand(0), m_APInt(C)) &&
6503         !match(II.getOperand(1), m_APInt(C)))
6504       break;
6505 
6506     switch (II.getIntrinsicID()) {
6507     case Intrinsic::umin:
6508       Upper = *C + 1;
6509       break;
6510     case Intrinsic::umax:
6511       Lower = *C;
6512       break;
6513     case Intrinsic::smin:
6514       Lower = APInt::getSignedMinValue(Width);
6515       Upper = *C + 1;
6516       break;
6517     case Intrinsic::smax:
6518       Lower = *C;
6519       Upper = APInt::getSignedMaxValue(Width) + 1;
6520       break;
6521     default:
6522       llvm_unreachable("Must be min/max intrinsic");
6523     }
6524     break;
6525   case Intrinsic::abs:
6526     // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
6527     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6528     if (match(II.getOperand(1), m_One()))
6529       Upper = APInt::getSignedMaxValue(Width) + 1;
6530     else
6531       Upper = APInt::getSignedMinValue(Width) + 1;
6532     break;
6533   default:
6534     break;
6535   }
6536 }
6537 
6538 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
6539                                       APInt &Upper, const InstrInfoQuery &IIQ) {
6540   const Value *LHS = nullptr, *RHS = nullptr;
6541   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
6542   if (R.Flavor == SPF_UNKNOWN)
6543     return;
6544 
6545   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
6546 
6547   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
6548     // If the negation part of the abs (in RHS) has the NSW flag,
6549     // then the result of abs(X) is [0..SIGNED_MAX],
6550     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
6551     Lower = APInt::getNullValue(BitWidth);
6552     if (match(RHS, m_Neg(m_Specific(LHS))) &&
6553         IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
6554       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6555     else
6556       Upper = APInt::getSignedMinValue(BitWidth) + 1;
6557     return;
6558   }
6559 
6560   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
6561     // The result of -abs(X) is <= 0.
6562     Lower = APInt::getSignedMinValue(BitWidth);
6563     Upper = APInt(BitWidth, 1);
6564     return;
6565   }
6566 
6567   const APInt *C;
6568   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
6569     return;
6570 
6571   switch (R.Flavor) {
6572     case SPF_UMIN:
6573       Upper = *C + 1;
6574       break;
6575     case SPF_UMAX:
6576       Lower = *C;
6577       break;
6578     case SPF_SMIN:
6579       Lower = APInt::getSignedMinValue(BitWidth);
6580       Upper = *C + 1;
6581       break;
6582     case SPF_SMAX:
6583       Lower = *C;
6584       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
6585       break;
6586     default:
6587       break;
6588   }
6589 }
6590 
6591 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo,
6592                                          AssumptionCache *AC,
6593                                          const Instruction *CtxI,
6594                                          unsigned Depth) {
6595   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
6596 
6597   if (Depth == MaxAnalysisRecursionDepth)
6598     return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
6599 
6600   const APInt *C;
6601   if (match(V, m_APInt(C)))
6602     return ConstantRange(*C);
6603 
6604   InstrInfoQuery IIQ(UseInstrInfo);
6605   unsigned BitWidth = V->getType()->getScalarSizeInBits();
6606   APInt Lower = APInt(BitWidth, 0);
6607   APInt Upper = APInt(BitWidth, 0);
6608   if (auto *BO = dyn_cast<BinaryOperator>(V))
6609     setLimitsForBinOp(*BO, Lower, Upper, IIQ);
6610   else if (auto *II = dyn_cast<IntrinsicInst>(V))
6611     setLimitsForIntrinsic(*II, Lower, Upper);
6612   else if (auto *SI = dyn_cast<SelectInst>(V))
6613     setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
6614 
6615   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
6616 
6617   if (auto *I = dyn_cast<Instruction>(V))
6618     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
6619       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
6620 
6621   if (CtxI && AC) {
6622     // Try to restrict the range based on information from assumptions.
6623     for (auto &AssumeVH : AC->assumptionsFor(V)) {
6624       if (!AssumeVH)
6625         continue;
6626       CallInst *I = cast<CallInst>(AssumeVH);
6627       assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
6628              "Got assumption for the wrong function!");
6629       assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
6630              "must be an assume intrinsic");
6631 
6632       if (!isValidAssumeForContext(I, CtxI, nullptr))
6633         continue;
6634       Value *Arg = I->getArgOperand(0);
6635       ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
6636       // Currently we just use information from comparisons.
6637       if (!Cmp || Cmp->getOperand(0) != V)
6638         continue;
6639       ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo,
6640                                                AC, I, Depth + 1);
6641       CR = CR.intersectWith(
6642           ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS));
6643     }
6644   }
6645 
6646   return CR;
6647 }
6648 
6649 static Optional<int64_t>
6650 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
6651   // Skip over the first indices.
6652   gep_type_iterator GTI = gep_type_begin(GEP);
6653   for (unsigned i = 1; i != Idx; ++i, ++GTI)
6654     /*skip along*/;
6655 
6656   // Compute the offset implied by the rest of the indices.
6657   int64_t Offset = 0;
6658   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
6659     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
6660     if (!OpC)
6661       return None;
6662     if (OpC->isZero())
6663       continue; // No offset.
6664 
6665     // Handle struct indices, which add their field offset to the pointer.
6666     if (StructType *STy = GTI.getStructTypeOrNull()) {
6667       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
6668       continue;
6669     }
6670 
6671     // Otherwise, we have a sequential type like an array or fixed-length
6672     // vector. Multiply the index by the ElementSize.
6673     TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
6674     if (Size.isScalable())
6675       return None;
6676     Offset += Size.getFixedSize() * OpC->getSExtValue();
6677   }
6678 
6679   return Offset;
6680 }
6681 
6682 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
6683                                         const DataLayout &DL) {
6684   Ptr1 = Ptr1->stripPointerCasts();
6685   Ptr2 = Ptr2->stripPointerCasts();
6686 
6687   // Handle the trivial case first.
6688   if (Ptr1 == Ptr2) {
6689     return 0;
6690   }
6691 
6692   const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
6693   const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
6694 
6695   // If one pointer is a GEP see if the GEP is a constant offset from the base,
6696   // as in "P" and "gep P, 1".
6697   // Also do this iteratively to handle the the following case:
6698   //   Ptr_t1 = GEP Ptr1, c1
6699   //   Ptr_t2 = GEP Ptr_t1, c2
6700   //   Ptr2 = GEP Ptr_t2, c3
6701   // where we will return c1+c2+c3.
6702   // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base
6703   // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases
6704   // are the same, and return the difference between offsets.
6705   auto getOffsetFromBase = [&DL](const GEPOperator *GEP,
6706                                  const Value *Ptr) -> Optional<int64_t> {
6707     const GEPOperator *GEP_T = GEP;
6708     int64_t OffsetVal = 0;
6709     bool HasSameBase = false;
6710     while (GEP_T) {
6711       auto Offset = getOffsetFromIndex(GEP_T, 1, DL);
6712       if (!Offset)
6713         return None;
6714       OffsetVal += *Offset;
6715       auto Op0 = GEP_T->getOperand(0)->stripPointerCasts();
6716       if (Op0 == Ptr) {
6717         HasSameBase = true;
6718         break;
6719       }
6720       GEP_T = dyn_cast<GEPOperator>(Op0);
6721     }
6722     if (!HasSameBase)
6723       return None;
6724     return OffsetVal;
6725   };
6726 
6727   if (GEP1) {
6728     auto Offset = getOffsetFromBase(GEP1, Ptr2);
6729     if (Offset)
6730       return -*Offset;
6731   }
6732   if (GEP2) {
6733     auto Offset = getOffsetFromBase(GEP2, Ptr1);
6734     if (Offset)
6735       return Offset;
6736   }
6737 
6738   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
6739   // base.  After that base, they may have some number of common (and
6740   // potentially variable) indices.  After that they handle some constant
6741   // offset, which determines their offset from each other.  At this point, we
6742   // handle no other case.
6743   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
6744     return None;
6745 
6746   // Skip any common indices and track the GEP types.
6747   unsigned Idx = 1;
6748   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
6749     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
6750       break;
6751 
6752   auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL);
6753   auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL);
6754   if (!Offset1 || !Offset2)
6755     return None;
6756   return *Offset2 - *Offset1;
6757 }
6758